In [1]:
import os
from tqdm import tqdm
In [2]:
datapath='C:/Users/Ravi/Desktop/Cadla.ai assignment/find_phone'
In [3]:
from PIL import Image
import matplotlib.pyplot as plt
%matplotlib inline

photo_loc_mapping={}
input_data=[]
labels=[]


def preprocess_photo(photopath,name):
    img=Image.open(photopath+'/'+name)
    input_data.append(img)
    labels.append(photo_loc_mapping[name])
    #plt.imshow(img)
    #plt.show()
    #img.show()
    return 
    
In [4]:
def get_data(datapath):
    
    # First load txt file
    for f in os.listdir(datapath):
        if f.endswith('.txt'):
            with open(datapath+'/'+f) as txt_file:
                lines=txt_file.readlines()
                #print(lines)
            for line in lines:
                line_data=line.split()
                photo_loc_mapping[line_data[0]]=[float(line_data[1]),float(line_data[2])]
        
        
    for f in os.listdir(datapath):  
        if f.endswith('.jpg'):
            preprocess_photo(datapath,f)
            
            
            
    return 

get_data(datapath)
In [5]:
# visualize input_data
from PIL import ImageDraw
def visualize(img_arr,loc):
    width, height = img_arr.size
    #print(loc, img_arr.size)
    #print("Line 1: ",(0,int(float(loc[1])*height), width,int(float(loc[1])*height))
    #print("Line 2: ",(int(float(loc[0])*width),0,int(float(loc[0])*width),height))
    
    draw = ImageDraw.Draw(img_arr) 
    #print(0,int(float(loc[0])*width), height,int(float(loc[0])*width))
    #print(0,int(float(loc[1])*height), width,int(float(loc[1])*height))
    #print(int(float(loc[0])*width),0,int(float(loc[0])*width),height)
    
    x=int(float(loc[1])*height)
    y=int(float(loc[0])*width)
    r=int(loc[0]*0.05)+30
    #draw.line((0,x, width,x), fill=124,width=1)
    #draw.line((y,0,y,height), fill=124,width=1)
    
    print(x-r, y-r, x+r, y+r)
    draw.ellipse((y-r, x-r, y+r, x+r),outline ='blue')
    
    plt.imshow(img_arr)
    plt.show()

    
for photo,label in zip(input_data,labels):
    visualize(photo,label)
14 376 74 436
112 210 172 270
248 371 308 431
181 68 241 128
174 192 234 252
220 92 280 152
52 332 112 392
245 253 305 313
94 298 154 358
124 84 184 144
131 207 191 267
54 369 114 429
196 369 256 429
158 244 218 304
205 193 265 253
244 182 304 242
133 222 193 282
190 362 250 422
76 347 136 407
227 190 287 250
42 288 102 348
128 229 188 289
116 207 176 267
250 60 310 120
202 379 262 439
254 82 314 142
233 77 293 137
143 200 203 260
24 344 84 404
242 405 302 465
44 190 104 250
263 80 323 140
66 310 126 370
57 63 117 123
60 156 120 216
180 43 240 103
181 354 241 414
115 111 175 171
35 85 95 145
124 276 184 336
88 75 148 135
261 114 321 174
76 254 136 314
134 215 194 275
82 296 142 356
224 139 284 199
156 68 216 128
171 173 231 233
69 371 129 431
52 247 112 307
211 94 271 154
69 276 129 336
88 73 148 133
196 305 256 365
91 43 151 103
76 285 136 345
11 283 71 343
82 24 142 84
73 104 133 164
175 180 235 240
214 381 274 441
186 386 246 446
232 97 292 157
78 268 138 328
168 295 228 355
177 307 237 367
207 60 267 120
235 342 295 402
95 296 155 356
146 28 206 88
113 403 173 463
119 225 179 285
5 62 65 122
7 264 67 324
180 393 240 453
39 384 99 444
223 170 283 230
165 87 225 147
61 236 121 296
-5 134 55 194
214 305 274 365
122 129 182 189
173 403 233 463
20 384 80 444
202 136 262 196
223 143 283 203
87 43 147 103
82 134 142 194
50 45 110 105
242 117 302 177
242 102 302 162
146 369 206 429
241 -8 301 52
14 16 74 76
193 89 253 149
26 28 86 88
50 176 110 236
208 386 268 446
91 305 151 365
144 202 204 262
39 94 99 154
213 352 273 412
201 369 261 429
134 185 194 245
128 339 188 399
116 320 176 380
82 290 142 350
60 345 120 405
253 214 313 274
11 263 71 323
39 256 99 316
202 318 262 378
52 60 112 120
42 85 102 145
63 112 123 172
76 126 136 186
88 102 148 162
63 149 123 209
137 100 197 160
42 217 102 277
199 283 259 343
57 364 117 424
115 136 175 196
72 94 132 154
245 200 305 260
109 340 169 400
232 237 292 297
143 151 203 211
In [6]:
from keras.activations import relu
from keras.callbacks import EarlyStopping,TensorBoard, ModelCheckpoint
from keras.layers import *
from keras.optimizers import adam
from keras.losses import mse
from keras.models import Sequential,Model
from keras.utils import to_categorical
import seaborn as sn
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
%matplotlib inline
from sklearn.utils import shuffle
import numpy as np
Using TensorFlow backend.
In [7]:
#np.array(input_data[0])
In [8]:
from keras.models import Model
from keras.applications import mobilenetv2
from keras.applications.mobilenetv2 import preprocess_input
model_mobile=mobilenetv2.MobileNetV2(input_shape=(224,224,3), include_top=True, weights='imagenet',classes=1000)
WARNING:tensorflow:From C:\Users\Ravi\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
In [9]:
model2=Model(input=model_mobile.input,output=model_mobile.layers[-2].output)
print(model2.summary())
print(model2.output_shape)
C:\Users\Ravi\Anaconda3\lib\site-packages\ipykernel_launcher.py:1: UserWarning: Update your `Model` call to the Keras 2 API: `Model(inputs=Tensor("in..., outputs=Tensor("gl...)`
  """Entry point for launching an IPython kernel.
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            (None, 224, 224, 3)  0                                            
__________________________________________________________________________________________________
Conv1_pad (ZeroPadding2D)       (None, 225, 225, 3)  0           input_1[0][0]                    
__________________________________________________________________________________________________
Conv1 (Conv2D)                  (None, 112, 112, 32) 864         Conv1_pad[0][0]                  
__________________________________________________________________________________________________
bn_Conv1 (BatchNormalization)   (None, 112, 112, 32) 128         Conv1[0][0]                      
__________________________________________________________________________________________________
Conv1_relu (ReLU)               (None, 112, 112, 32) 0           bn_Conv1[0][0]                   
__________________________________________________________________________________________________
expanded_conv_depthwise (Depthw (None, 112, 112, 32) 288         Conv1_relu[0][0]                 
__________________________________________________________________________________________________
expanded_conv_depthwise_BN (Bat (None, 112, 112, 32) 128         expanded_conv_depthwise[0][0]    
__________________________________________________________________________________________________
expanded_conv_depthwise_relu (R (None, 112, 112, 32) 0           expanded_conv_depthwise_BN[0][0] 
__________________________________________________________________________________________________
expanded_conv_project (Conv2D)  (None, 112, 112, 16) 512         expanded_conv_depthwise_relu[0][0
__________________________________________________________________________________________________
expanded_conv_project_BN (Batch (None, 112, 112, 16) 64          expanded_conv_project[0][0]      
__________________________________________________________________________________________________
block_1_expand (Conv2D)         (None, 112, 112, 96) 1536        expanded_conv_project_BN[0][0]   
__________________________________________________________________________________________________
block_1_expand_BN (BatchNormali (None, 112, 112, 96) 384         block_1_expand[0][0]             
__________________________________________________________________________________________________
block_1_expand_relu (ReLU)      (None, 112, 112, 96) 0           block_1_expand_BN[0][0]          
__________________________________________________________________________________________________
block_1_pad (ZeroPadding2D)     (None, 113, 113, 96) 0           block_1_expand_relu[0][0]        
__________________________________________________________________________________________________
block_1_depthwise (DepthwiseCon (None, 56, 56, 96)   864         block_1_pad[0][0]                
__________________________________________________________________________________________________
block_1_depthwise_BN (BatchNorm (None, 56, 56, 96)   384         block_1_depthwise[0][0]          
__________________________________________________________________________________________________
block_1_depthwise_relu (ReLU)   (None, 56, 56, 96)   0           block_1_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_1_project (Conv2D)        (None, 56, 56, 24)   2304        block_1_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_1_project_BN (BatchNormal (None, 56, 56, 24)   96          block_1_project[0][0]            
__________________________________________________________________________________________________
block_2_expand (Conv2D)         (None, 56, 56, 144)  3456        block_1_project_BN[0][0]         
__________________________________________________________________________________________________
block_2_expand_BN (BatchNormali (None, 56, 56, 144)  576         block_2_expand[0][0]             
__________________________________________________________________________________________________
block_2_expand_relu (ReLU)      (None, 56, 56, 144)  0           block_2_expand_BN[0][0]          
__________________________________________________________________________________________________
block_2_depthwise (DepthwiseCon (None, 56, 56, 144)  1296        block_2_expand_relu[0][0]        
__________________________________________________________________________________________________
block_2_depthwise_BN (BatchNorm (None, 56, 56, 144)  576         block_2_depthwise[0][0]          
__________________________________________________________________________________________________
block_2_depthwise_relu (ReLU)   (None, 56, 56, 144)  0           block_2_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_2_project (Conv2D)        (None, 56, 56, 24)   3456        block_2_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_2_project_BN (BatchNormal (None, 56, 56, 24)   96          block_2_project[0][0]            
__________________________________________________________________________________________________
block_2_add (Add)               (None, 56, 56, 24)   0           block_1_project_BN[0][0]         
                                                                 block_2_project_BN[0][0]         
__________________________________________________________________________________________________
block_3_expand (Conv2D)         (None, 56, 56, 144)  3456        block_2_add[0][0]                
__________________________________________________________________________________________________
block_3_expand_BN (BatchNormali (None, 56, 56, 144)  576         block_3_expand[0][0]             
__________________________________________________________________________________________________
block_3_expand_relu (ReLU)      (None, 56, 56, 144)  0           block_3_expand_BN[0][0]          
__________________________________________________________________________________________________
block_3_pad (ZeroPadding2D)     (None, 57, 57, 144)  0           block_3_expand_relu[0][0]        
__________________________________________________________________________________________________
block_3_depthwise (DepthwiseCon (None, 28, 28, 144)  1296        block_3_pad[0][0]                
__________________________________________________________________________________________________
block_3_depthwise_BN (BatchNorm (None, 28, 28, 144)  576         block_3_depthwise[0][0]          
__________________________________________________________________________________________________
block_3_depthwise_relu (ReLU)   (None, 28, 28, 144)  0           block_3_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_3_project (Conv2D)        (None, 28, 28, 32)   4608        block_3_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_3_project_BN (BatchNormal (None, 28, 28, 32)   128         block_3_project[0][0]            
__________________________________________________________________________________________________
block_4_expand (Conv2D)         (None, 28, 28, 192)  6144        block_3_project_BN[0][0]         
__________________________________________________________________________________________________
block_4_expand_BN (BatchNormali (None, 28, 28, 192)  768         block_4_expand[0][0]             
__________________________________________________________________________________________________
block_4_expand_relu (ReLU)      (None, 28, 28, 192)  0           block_4_expand_BN[0][0]          
__________________________________________________________________________________________________
block_4_depthwise (DepthwiseCon (None, 28, 28, 192)  1728        block_4_expand_relu[0][0]        
__________________________________________________________________________________________________
block_4_depthwise_BN (BatchNorm (None, 28, 28, 192)  768         block_4_depthwise[0][0]          
__________________________________________________________________________________________________
block_4_depthwise_relu (ReLU)   (None, 28, 28, 192)  0           block_4_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_4_project (Conv2D)        (None, 28, 28, 32)   6144        block_4_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_4_project_BN (BatchNormal (None, 28, 28, 32)   128         block_4_project[0][0]            
__________________________________________________________________________________________________
block_4_add (Add)               (None, 28, 28, 32)   0           block_3_project_BN[0][0]         
                                                                 block_4_project_BN[0][0]         
__________________________________________________________________________________________________
block_5_expand (Conv2D)         (None, 28, 28, 192)  6144        block_4_add[0][0]                
__________________________________________________________________________________________________
block_5_expand_BN (BatchNormali (None, 28, 28, 192)  768         block_5_expand[0][0]             
__________________________________________________________________________________________________
block_5_expand_relu (ReLU)      (None, 28, 28, 192)  0           block_5_expand_BN[0][0]          
__________________________________________________________________________________________________
block_5_depthwise (DepthwiseCon (None, 28, 28, 192)  1728        block_5_expand_relu[0][0]        
__________________________________________________________________________________________________
block_5_depthwise_BN (BatchNorm (None, 28, 28, 192)  768         block_5_depthwise[0][0]          
__________________________________________________________________________________________________
block_5_depthwise_relu (ReLU)   (None, 28, 28, 192)  0           block_5_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_5_project (Conv2D)        (None, 28, 28, 32)   6144        block_5_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_5_project_BN (BatchNormal (None, 28, 28, 32)   128         block_5_project[0][0]            
__________________________________________________________________________________________________
block_5_add (Add)               (None, 28, 28, 32)   0           block_4_add[0][0]                
                                                                 block_5_project_BN[0][0]         
__________________________________________________________________________________________________
block_6_expand (Conv2D)         (None, 28, 28, 192)  6144        block_5_add[0][0]                
__________________________________________________________________________________________________
block_6_expand_BN (BatchNormali (None, 28, 28, 192)  768         block_6_expand[0][0]             
__________________________________________________________________________________________________
block_6_expand_relu (ReLU)      (None, 28, 28, 192)  0           block_6_expand_BN[0][0]          
__________________________________________________________________________________________________
block_6_pad (ZeroPadding2D)     (None, 29, 29, 192)  0           block_6_expand_relu[0][0]        
__________________________________________________________________________________________________
block_6_depthwise (DepthwiseCon (None, 14, 14, 192)  1728        block_6_pad[0][0]                
__________________________________________________________________________________________________
block_6_depthwise_BN (BatchNorm (None, 14, 14, 192)  768         block_6_depthwise[0][0]          
__________________________________________________________________________________________________
block_6_depthwise_relu (ReLU)   (None, 14, 14, 192)  0           block_6_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_6_project (Conv2D)        (None, 14, 14, 64)   12288       block_6_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_6_project_BN (BatchNormal (None, 14, 14, 64)   256         block_6_project[0][0]            
__________________________________________________________________________________________________
block_7_expand (Conv2D)         (None, 14, 14, 384)  24576       block_6_project_BN[0][0]         
__________________________________________________________________________________________________
block_7_expand_BN (BatchNormali (None, 14, 14, 384)  1536        block_7_expand[0][0]             
__________________________________________________________________________________________________
block_7_expand_relu (ReLU)      (None, 14, 14, 384)  0           block_7_expand_BN[0][0]          
__________________________________________________________________________________________________
block_7_depthwise (DepthwiseCon (None, 14, 14, 384)  3456        block_7_expand_relu[0][0]        
__________________________________________________________________________________________________
block_7_depthwise_BN (BatchNorm (None, 14, 14, 384)  1536        block_7_depthwise[0][0]          
__________________________________________________________________________________________________
block_7_depthwise_relu (ReLU)   (None, 14, 14, 384)  0           block_7_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_7_project (Conv2D)        (None, 14, 14, 64)   24576       block_7_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_7_project_BN (BatchNormal (None, 14, 14, 64)   256         block_7_project[0][0]            
__________________________________________________________________________________________________
block_7_add (Add)               (None, 14, 14, 64)   0           block_6_project_BN[0][0]         
                                                                 block_7_project_BN[0][0]         
__________________________________________________________________________________________________
block_8_expand (Conv2D)         (None, 14, 14, 384)  24576       block_7_add[0][0]                
__________________________________________________________________________________________________
block_8_expand_BN (BatchNormali (None, 14, 14, 384)  1536        block_8_expand[0][0]             
__________________________________________________________________________________________________
block_8_expand_relu (ReLU)      (None, 14, 14, 384)  0           block_8_expand_BN[0][0]          
__________________________________________________________________________________________________
block_8_depthwise (DepthwiseCon (None, 14, 14, 384)  3456        block_8_expand_relu[0][0]        
__________________________________________________________________________________________________
block_8_depthwise_BN (BatchNorm (None, 14, 14, 384)  1536        block_8_depthwise[0][0]          
__________________________________________________________________________________________________
block_8_depthwise_relu (ReLU)   (None, 14, 14, 384)  0           block_8_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_8_project (Conv2D)        (None, 14, 14, 64)   24576       block_8_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_8_project_BN (BatchNormal (None, 14, 14, 64)   256         block_8_project[0][0]            
__________________________________________________________________________________________________
block_8_add (Add)               (None, 14, 14, 64)   0           block_7_add[0][0]                
                                                                 block_8_project_BN[0][0]         
__________________________________________________________________________________________________
block_9_expand (Conv2D)         (None, 14, 14, 384)  24576       block_8_add[0][0]                
__________________________________________________________________________________________________
block_9_expand_BN (BatchNormali (None, 14, 14, 384)  1536        block_9_expand[0][0]             
__________________________________________________________________________________________________
block_9_expand_relu (ReLU)      (None, 14, 14, 384)  0           block_9_expand_BN[0][0]          
__________________________________________________________________________________________________
block_9_depthwise (DepthwiseCon (None, 14, 14, 384)  3456        block_9_expand_relu[0][0]        
__________________________________________________________________________________________________
block_9_depthwise_BN (BatchNorm (None, 14, 14, 384)  1536        block_9_depthwise[0][0]          
__________________________________________________________________________________________________
block_9_depthwise_relu (ReLU)   (None, 14, 14, 384)  0           block_9_depthwise_BN[0][0]       
__________________________________________________________________________________________________
block_9_project (Conv2D)        (None, 14, 14, 64)   24576       block_9_depthwise_relu[0][0]     
__________________________________________________________________________________________________
block_9_project_BN (BatchNormal (None, 14, 14, 64)   256         block_9_project[0][0]            
__________________________________________________________________________________________________
block_9_add (Add)               (None, 14, 14, 64)   0           block_8_add[0][0]                
                                                                 block_9_project_BN[0][0]         
__________________________________________________________________________________________________
block_10_expand (Conv2D)        (None, 14, 14, 384)  24576       block_9_add[0][0]                
__________________________________________________________________________________________________
block_10_expand_BN (BatchNormal (None, 14, 14, 384)  1536        block_10_expand[0][0]            
__________________________________________________________________________________________________
block_10_expand_relu (ReLU)     (None, 14, 14, 384)  0           block_10_expand_BN[0][0]         
__________________________________________________________________________________________________
block_10_depthwise (DepthwiseCo (None, 14, 14, 384)  3456        block_10_expand_relu[0][0]       
__________________________________________________________________________________________________
block_10_depthwise_BN (BatchNor (None, 14, 14, 384)  1536        block_10_depthwise[0][0]         
__________________________________________________________________________________________________
block_10_depthwise_relu (ReLU)  (None, 14, 14, 384)  0           block_10_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_10_project (Conv2D)       (None, 14, 14, 96)   36864       block_10_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_10_project_BN (BatchNorma (None, 14, 14, 96)   384         block_10_project[0][0]           
__________________________________________________________________________________________________
block_11_expand (Conv2D)        (None, 14, 14, 576)  55296       block_10_project_BN[0][0]        
__________________________________________________________________________________________________
block_11_expand_BN (BatchNormal (None, 14, 14, 576)  2304        block_11_expand[0][0]            
__________________________________________________________________________________________________
block_11_expand_relu (ReLU)     (None, 14, 14, 576)  0           block_11_expand_BN[0][0]         
__________________________________________________________________________________________________
block_11_depthwise (DepthwiseCo (None, 14, 14, 576)  5184        block_11_expand_relu[0][0]       
__________________________________________________________________________________________________
block_11_depthwise_BN (BatchNor (None, 14, 14, 576)  2304        block_11_depthwise[0][0]         
__________________________________________________________________________________________________
block_11_depthwise_relu (ReLU)  (None, 14, 14, 576)  0           block_11_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_11_project (Conv2D)       (None, 14, 14, 96)   55296       block_11_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_11_project_BN (BatchNorma (None, 14, 14, 96)   384         block_11_project[0][0]           
__________________________________________________________________________________________________
block_11_add (Add)              (None, 14, 14, 96)   0           block_10_project_BN[0][0]        
                                                                 block_11_project_BN[0][0]        
__________________________________________________________________________________________________
block_12_expand (Conv2D)        (None, 14, 14, 576)  55296       block_11_add[0][0]               
__________________________________________________________________________________________________
block_12_expand_BN (BatchNormal (None, 14, 14, 576)  2304        block_12_expand[0][0]            
__________________________________________________________________________________________________
block_12_expand_relu (ReLU)     (None, 14, 14, 576)  0           block_12_expand_BN[0][0]         
__________________________________________________________________________________________________
block_12_depthwise (DepthwiseCo (None, 14, 14, 576)  5184        block_12_expand_relu[0][0]       
__________________________________________________________________________________________________
block_12_depthwise_BN (BatchNor (None, 14, 14, 576)  2304        block_12_depthwise[0][0]         
__________________________________________________________________________________________________
block_12_depthwise_relu (ReLU)  (None, 14, 14, 576)  0           block_12_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_12_project (Conv2D)       (None, 14, 14, 96)   55296       block_12_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_12_project_BN (BatchNorma (None, 14, 14, 96)   384         block_12_project[0][0]           
__________________________________________________________________________________________________
block_12_add (Add)              (None, 14, 14, 96)   0           block_11_add[0][0]               
                                                                 block_12_project_BN[0][0]        
__________________________________________________________________________________________________
block_13_expand (Conv2D)        (None, 14, 14, 576)  55296       block_12_add[0][0]               
__________________________________________________________________________________________________
block_13_expand_BN (BatchNormal (None, 14, 14, 576)  2304        block_13_expand[0][0]            
__________________________________________________________________________________________________
block_13_expand_relu (ReLU)     (None, 14, 14, 576)  0           block_13_expand_BN[0][0]         
__________________________________________________________________________________________________
block_13_pad (ZeroPadding2D)    (None, 15, 15, 576)  0           block_13_expand_relu[0][0]       
__________________________________________________________________________________________________
block_13_depthwise (DepthwiseCo (None, 7, 7, 576)    5184        block_13_pad[0][0]               
__________________________________________________________________________________________________
block_13_depthwise_BN (BatchNor (None, 7, 7, 576)    2304        block_13_depthwise[0][0]         
__________________________________________________________________________________________________
block_13_depthwise_relu (ReLU)  (None, 7, 7, 576)    0           block_13_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_13_project (Conv2D)       (None, 7, 7, 160)    92160       block_13_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_13_project_BN (BatchNorma (None, 7, 7, 160)    640         block_13_project[0][0]           
__________________________________________________________________________________________________
block_14_expand (Conv2D)        (None, 7, 7, 960)    153600      block_13_project_BN[0][0]        
__________________________________________________________________________________________________
block_14_expand_BN (BatchNormal (None, 7, 7, 960)    3840        block_14_expand[0][0]            
__________________________________________________________________________________________________
block_14_expand_relu (ReLU)     (None, 7, 7, 960)    0           block_14_expand_BN[0][0]         
__________________________________________________________________________________________________
block_14_depthwise (DepthwiseCo (None, 7, 7, 960)    8640        block_14_expand_relu[0][0]       
__________________________________________________________________________________________________
block_14_depthwise_BN (BatchNor (None, 7, 7, 960)    3840        block_14_depthwise[0][0]         
__________________________________________________________________________________________________
block_14_depthwise_relu (ReLU)  (None, 7, 7, 960)    0           block_14_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_14_project (Conv2D)       (None, 7, 7, 160)    153600      block_14_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_14_project_BN (BatchNorma (None, 7, 7, 160)    640         block_14_project[0][0]           
__________________________________________________________________________________________________
block_14_add (Add)              (None, 7, 7, 160)    0           block_13_project_BN[0][0]        
                                                                 block_14_project_BN[0][0]        
__________________________________________________________________________________________________
block_15_expand (Conv2D)        (None, 7, 7, 960)    153600      block_14_add[0][0]               
__________________________________________________________________________________________________
block_15_expand_BN (BatchNormal (None, 7, 7, 960)    3840        block_15_expand[0][0]            
__________________________________________________________________________________________________
block_15_expand_relu (ReLU)     (None, 7, 7, 960)    0           block_15_expand_BN[0][0]         
__________________________________________________________________________________________________
block_15_depthwise (DepthwiseCo (None, 7, 7, 960)    8640        block_15_expand_relu[0][0]       
__________________________________________________________________________________________________
block_15_depthwise_BN (BatchNor (None, 7, 7, 960)    3840        block_15_depthwise[0][0]         
__________________________________________________________________________________________________
block_15_depthwise_relu (ReLU)  (None, 7, 7, 960)    0           block_15_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_15_project (Conv2D)       (None, 7, 7, 160)    153600      block_15_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_15_project_BN (BatchNorma (None, 7, 7, 160)    640         block_15_project[0][0]           
__________________________________________________________________________________________________
block_15_add (Add)              (None, 7, 7, 160)    0           block_14_add[0][0]               
                                                                 block_15_project_BN[0][0]        
__________________________________________________________________________________________________
block_16_expand (Conv2D)        (None, 7, 7, 960)    153600      block_15_add[0][0]               
__________________________________________________________________________________________________
block_16_expand_BN (BatchNormal (None, 7, 7, 960)    3840        block_16_expand[0][0]            
__________________________________________________________________________________________________
block_16_expand_relu (ReLU)     (None, 7, 7, 960)    0           block_16_expand_BN[0][0]         
__________________________________________________________________________________________________
block_16_depthwise (DepthwiseCo (None, 7, 7, 960)    8640        block_16_expand_relu[0][0]       
__________________________________________________________________________________________________
block_16_depthwise_BN (BatchNor (None, 7, 7, 960)    3840        block_16_depthwise[0][0]         
__________________________________________________________________________________________________
block_16_depthwise_relu (ReLU)  (None, 7, 7, 960)    0           block_16_depthwise_BN[0][0]      
__________________________________________________________________________________________________
block_16_project (Conv2D)       (None, 7, 7, 320)    307200      block_16_depthwise_relu[0][0]    
__________________________________________________________________________________________________
block_16_project_BN (BatchNorma (None, 7, 7, 320)    1280        block_16_project[0][0]           
__________________________________________________________________________________________________
Conv_1 (Conv2D)                 (None, 7, 7, 1280)   409600      block_16_project_BN[0][0]        
__________________________________________________________________________________________________
Conv_1_bn (BatchNormalization)  (None, 7, 7, 1280)   5120        Conv_1[0][0]                     
__________________________________________________________________________________________________
out_relu (ReLU)                 (None, 7, 7, 1280)   0           Conv_1_bn[0][0]                  
__________________________________________________________________________________________________
global_average_pooling2d_1 (Glo (None, 1280)         0           out_relu[0][0]                   
==================================================================================================
Total params: 2,257,984
Trainable params: 2,223,872
Non-trainable params: 34,112
__________________________________________________________________________________________________
None
(None, 1280)
In [10]:
from keras.utils import to_categorical
def test_train_dev_split(input_data, output_data, train=0.8, dev=0.1,
                         test=0.1):
    #make seed for exact results everything
    #input_data=preprocess_input(input_data)
    input_data, output_data = shuffle(input_data, output_data, random_state=0)
    split1 = int(train * len(input_data))
    split2 = int((train + dev) * len(input_data))
    train_input = input_data[:split1]
    dev_input = input_data[split1:split2]
    test_input = input_data[split2:]
    

    train_output = output_data[:split1]
    dev_output = output_data[split1:split2]
    test_output = output_data[split2:]
    
    
    train_input=model2.predict(preprocess_input(np.array([np.array(i.resize((224,224))) for i in train_input])))
    dev_input=model2.predict(preprocess_input(np.array([np.array(i.resize((224,224))) for i in dev_input])))
    test_input=model2.predict(preprocess_input(np.array([np.array(i.resize((224,224))) for i in test_input])))
    print(train_input[0])

    return train_input,np.array(train_output),dev_input, np.array(dev_output),test_input, np.array(test_output)
In [11]:
from keras import backend as K 

def euc_dist_keras(y_true, y_pred):
    return K.sqrt(K.sum(K.square(y_true - y_pred), axis=-1, keepdims=True))
In [12]:
def phone_finder_model_dnn(input_data, output_data):

    train_in, train_out, dev_in, dev_out, test_in, test_out = test_train_dev_split(
        input_data, output_data)

    #https://arxiv.org/pdf/1509.05371v2.pdf
    
    print(np.array(train_in).shape)
    model=Sequential()
    model.add(Dense(64,activation='relu',input_shape=(1280,)))
    model.add(Dropout(0.4))
    
    '''model.add(Dense(256,activation='relu'))
    model.add(Dropout(0.4))
    model.add(BatchNormalization())
    
    model.add(Dense(128,activation='relu'))
    model.add(Dropout(0.4))'''
    
    model.add(Dense(8,activation='relu'))
    model.add(Dropout(0.4))
    
    model.add(Dense(2,activation='linear'))
    print(model.input_shape,model.output_shape)
    print(model.summary())

    '''
    model.compile(
        optimizer=adam(0.001),
        loss=[focal_loss(alpha=.25, gamma=2)],
        metrics=['accuracy'])
    
    '''
    model.compile(
        optimizer=adam(0.0001),
        loss='mse',
        metrics=['mae'])
    
    early = EarlyStopping(patience=100)
    
    check = ModelCheckpoint(
        'find_phone_dnn_model.h5',
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        save_weights_only=False,
        mode='auto',
        period=1)

    model_history=model.fit(
        train_in,
        train_out,
        batch_size=32,
        callbacks=[early, check],
        validation_data=(dev_in, dev_out),
        epochs=5000)

    loss,mse = model.evaluate(test_in, test_out)
    print("Loss: {0}    MAE: {1}".format(loss, mse))
    
    plt.plot(model_history.history['loss'])
    plt.plot(model_history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()
    

    return model

dnn_model=phone_finder_model_dnn(input_data,labels)
[0.9329858  0.02124948 0.06839955 ... 0.649957   0.3779843  0.26595476]
(102, 1280)
WARNING:tensorflow:From C:\Users\Ravi\Anaconda3\lib\site-packages\keras\backend\tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
(None, 1280) (None, 2)
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_1 (Dense)              (None, 64)                81984     
_________________________________________________________________
dropout_1 (Dropout)          (None, 64)                0         
_________________________________________________________________
dense_2 (Dense)              (None, 8)                 520       
_________________________________________________________________
dropout_2 (Dropout)          (None, 8)                 0         
_________________________________________________________________
dense_3 (Dense)              (None, 2)                 18        
=================================================================
Total params: 82,522
Trainable params: 82,522
Non-trainable params: 0
_________________________________________________________________
None
WARNING:tensorflow:From C:\Users\Ravi\Anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Train on 102 samples, validate on 13 samples
Epoch 1/5000
102/102 [==============================] - ETA: 2s - loss: 1.9813 - mean_absolute_error: 1.037 - 1s 11ms/step - loss: 2.0316 - mean_absolute_error: 1.0411 - val_loss: 0.3749 - val_mean_absolute_error: 0.4678

Epoch 00001: val_loss improved from inf to 0.37492, saving model to find_phone_dnn_model.h5
Epoch 2/5000
102/102 [==============================] - ETA: 0s - loss: 1.2594 - mean_absolute_error: 0.779 - 0s 225us/step - loss: 1.0635 - mean_absolute_error: 0.7977 - val_loss: 0.1786 - val_mean_absolute_error: 0.3445

Epoch 00002: val_loss improved from 0.37492 to 0.17863, saving model to find_phone_dnn_model.h5
Epoch 3/5000
102/102 [==============================] - ETA: 0s - loss: 0.7726 - mean_absolute_error: 0.687 - 0s 196us/step - loss: 0.6690 - mean_absolute_error: 0.6398 - val_loss: 0.1362 - val_mean_absolute_error: 0.2936

Epoch 00003: val_loss improved from 0.17863 to 0.13623, saving model to find_phone_dnn_model.h5
Epoch 4/5000
102/102 [==============================] - ETA: 0s - loss: 0.7700 - mean_absolute_error: 0.709 - 0s 225us/step - loss: 0.6388 - mean_absolute_error: 0.6333 - val_loss: 0.1323 - val_mean_absolute_error: 0.2936

Epoch 00004: val_loss improved from 0.13623 to 0.13229, saving model to find_phone_dnn_model.h5
Epoch 5/5000
102/102 [==============================] - ETA: 0s - loss: 0.3698 - mean_absolute_error: 0.506 - 0s 264us/step - loss: 0.4278 - mean_absolute_error: 0.5194 - val_loss: 0.1334 - val_mean_absolute_error: 0.2998

Epoch 00005: val_loss did not improve from 0.13229
Epoch 6/5000
102/102 [==============================] - ETA: 0s - loss: 0.6113 - mean_absolute_error: 0.640 - 0s 225us/step - loss: 0.4552 - mean_absolute_error: 0.5437 - val_loss: 0.1348 - val_mean_absolute_error: 0.3032

Epoch 00006: val_loss did not improve from 0.13229
Epoch 7/5000
102/102 [==============================] - ETA: 0s - loss: 0.2812 - mean_absolute_error: 0.443 - 0s 254us/step - loss: 0.4097 - mean_absolute_error: 0.5149 - val_loss: 0.1369 - val_mean_absolute_error: 0.3048

Epoch 00007: val_loss did not improve from 0.13229
Epoch 8/5000
102/102 [==============================] - ETA: 0s - loss: 0.3629 - mean_absolute_error: 0.523 - 0s 254us/step - loss: 0.3474 - mean_absolute_error: 0.4921 - val_loss: 0.1405 - val_mean_absolute_error: 0.3074

Epoch 00008: val_loss did not improve from 0.13229
Epoch 9/5000
102/102 [==============================] - ETA: 0s - loss: 0.2477 - mean_absolute_error: 0.403 - 0s 235us/step - loss: 0.2707 - mean_absolute_error: 0.4309 - val_loss: 0.1430 - val_mean_absolute_error: 0.3082

Epoch 00009: val_loss did not improve from 0.13229
Epoch 10/5000
102/102 [==============================] - ETA: 0s - loss: 0.2561 - mean_absolute_error: 0.417 - 0s 244us/step - loss: 0.2894 - mean_absolute_error: 0.4454 - val_loss: 0.1468 - val_mean_absolute_error: 0.3102

Epoch 00010: val_loss did not improve from 0.13229
Epoch 11/5000
102/102 [==============================] - ETA: 0s - loss: 0.2658 - mean_absolute_error: 0.442 - 0s 205us/step - loss: 0.2855 - mean_absolute_error: 0.4425 - val_loss: 0.1512 - val_mean_absolute_error: 0.3143

Epoch 00011: val_loss did not improve from 0.13229
Epoch 12/5000
102/102 [==============================] - ETA: 0s - loss: 0.3395 - mean_absolute_error: 0.496 - 0s 196us/step - loss: 0.3062 - mean_absolute_error: 0.4627 - val_loss: 0.1557 - val_mean_absolute_error: 0.3180

Epoch 00012: val_loss did not improve from 0.13229
Epoch 13/5000
102/102 [==============================] - ETA: 0s - loss: 0.2317 - mean_absolute_error: 0.411 - 0s 225us/step - loss: 0.3019 - mean_absolute_error: 0.4520 - val_loss: 0.1581 - val_mean_absolute_error: 0.3212

Epoch 00013: val_loss did not improve from 0.13229
Epoch 14/5000
102/102 [==============================] - ETA: 0s - loss: 0.2776 - mean_absolute_error: 0.430 - 0s 205us/step - loss: 0.2530 - mean_absolute_error: 0.4171 - val_loss: 0.1591 - val_mean_absolute_error: 0.3242

Epoch 00014: val_loss did not improve from 0.13229
Epoch 15/5000
102/102 [==============================] - ETA: 0s - loss: 0.1936 - mean_absolute_error: 0.358 - 0s 225us/step - loss: 0.2698 - mean_absolute_error: 0.4236 - val_loss: 0.1583 - val_mean_absolute_error: 0.3240

Epoch 00015: val_loss did not improve from 0.13229
Epoch 16/5000
102/102 [==============================] - ETA: 0s - loss: 0.2333 - mean_absolute_error: 0.424 - 0s 215us/step - loss: 0.2837 - mean_absolute_error: 0.4432 - val_loss: 0.1589 - val_mean_absolute_error: 0.3246

Epoch 00016: val_loss did not improve from 0.13229
Epoch 17/5000
102/102 [==============================] - ETA: 0s - loss: 0.1914 - mean_absolute_error: 0.367 - 0s 215us/step - loss: 0.2331 - mean_absolute_error: 0.4065 - val_loss: 0.1619 - val_mean_absolute_error: 0.3283

Epoch 00017: val_loss did not improve from 0.13229
Epoch 18/5000
102/102 [==============================] - ETA: 0s - loss: 0.2536 - mean_absolute_error: 0.410 - 0s 244us/step - loss: 0.2431 - mean_absolute_error: 0.4206 - val_loss: 0.1617 - val_mean_absolute_error: 0.3278

Epoch 00018: val_loss did not improve from 0.13229
Epoch 19/5000
102/102 [==============================] - ETA: 0s - loss: 0.2487 - mean_absolute_error: 0.420 - 0s 215us/step - loss: 0.2402 - mean_absolute_error: 0.4136 - val_loss: 0.1595 - val_mean_absolute_error: 0.3247

Epoch 00019: val_loss did not improve from 0.13229
Epoch 20/5000
102/102 [==============================] - ETA: 0s - loss: 0.2135 - mean_absolute_error: 0.397 - 0s 166us/step - loss: 0.2358 - mean_absolute_error: 0.4071 - val_loss: 0.1553 - val_mean_absolute_error: 0.3185

Epoch 00020: val_loss did not improve from 0.13229
Epoch 21/5000
102/102 [==============================] - ETA: 0s - loss: 0.2595 - mean_absolute_error: 0.431 - 0s 225us/step - loss: 0.2367 - mean_absolute_error: 0.4133 - val_loss: 0.1522 - val_mean_absolute_error: 0.3137

Epoch 00021: val_loss did not improve from 0.13229
Epoch 22/5000
102/102 [==============================] - ETA: 0s - loss: 0.2406 - mean_absolute_error: 0.409 - 0s 215us/step - loss: 0.2805 - mean_absolute_error: 0.4489 - val_loss: 0.1498 - val_mean_absolute_error: 0.3099

Epoch 00022: val_loss did not improve from 0.13229
Epoch 23/5000
102/102 [==============================] - ETA: 0s - loss: 0.2798 - mean_absolute_error: 0.442 - 0s 205us/step - loss: 0.2423 - mean_absolute_error: 0.4175 - val_loss: 0.1491 - val_mean_absolute_error: 0.3088

Epoch 00023: val_loss did not improve from 0.13229
Epoch 24/5000
102/102 [==============================] - ETA: 0s - loss: 0.2854 - mean_absolute_error: 0.448 - 0s 215us/step - loss: 0.2600 - mean_absolute_error: 0.4256 - val_loss: 0.1505 - val_mean_absolute_error: 0.3112

Epoch 00024: val_loss did not improve from 0.13229
Epoch 25/5000
102/102 [==============================] - ETA: 0s - loss: 0.1917 - mean_absolute_error: 0.365 - 0s 215us/step - loss: 0.2347 - mean_absolute_error: 0.4100 - val_loss: 0.1516 - val_mean_absolute_error: 0.3128

Epoch 00025: val_loss did not improve from 0.13229
Epoch 26/5000
102/102 [==============================] - ETA: 0s - loss: 0.2365 - mean_absolute_error: 0.403 - 0s 244us/step - loss: 0.2368 - mean_absolute_error: 0.4135 - val_loss: 0.1547 - val_mean_absolute_error: 0.3172

Epoch 00026: val_loss did not improve from 0.13229
Epoch 27/5000
102/102 [==============================] - ETA: 0s - loss: 0.2558 - mean_absolute_error: 0.432 - 0s 205us/step - loss: 0.2453 - mean_absolute_error: 0.4262 - val_loss: 0.1579 - val_mean_absolute_error: 0.3215

Epoch 00027: val_loss did not improve from 0.13229
Epoch 28/5000
102/102 [==============================] - ETA: 0s - loss: 0.2546 - mean_absolute_error: 0.432 - 0s 205us/step - loss: 0.2727 - mean_absolute_error: 0.4352 - val_loss: 0.1607 - val_mean_absolute_error: 0.3250

Epoch 00028: val_loss did not improve from 0.13229
Epoch 29/5000
102/102 [==============================] - ETA: 0s - loss: 0.2521 - mean_absolute_error: 0.427 - 0s 225us/step - loss: 0.2710 - mean_absolute_error: 0.4398 - val_loss: 0.1602 - val_mean_absolute_error: 0.3240

Epoch 00029: val_loss did not improve from 0.13229
Epoch 30/5000
102/102 [==============================] - ETA: 0s - loss: 0.2245 - mean_absolute_error: 0.403 - 0s 215us/step - loss: 0.2422 - mean_absolute_error: 0.4206 - val_loss: 0.1569 - val_mean_absolute_error: 0.3190

Epoch 00030: val_loss did not improve from 0.13229
Epoch 31/5000
102/102 [==============================] - ETA: 0s - loss: 0.3420 - mean_absolute_error: 0.473 - 0s 196us/step - loss: 0.2537 - mean_absolute_error: 0.4165 - val_loss: 0.1540 - val_mean_absolute_error: 0.3142

Epoch 00031: val_loss did not improve from 0.13229
Epoch 32/5000
102/102 [==============================] - ETA: 0s - loss: 0.1911 - mean_absolute_error: 0.349 - 0s 215us/step - loss: 0.2328 - mean_absolute_error: 0.3993 - val_loss: 0.1519 - val_mean_absolute_error: 0.3108

Epoch 00032: val_loss did not improve from 0.13229
Epoch 33/5000
102/102 [==============================] - ETA: 0s - loss: 0.2076 - mean_absolute_error: 0.392 - 0s 205us/step - loss: 0.2222 - mean_absolute_error: 0.4100 - val_loss: 0.1482 - val_mean_absolute_error: 0.3059

Epoch 00033: val_loss did not improve from 0.13229
Epoch 34/5000
102/102 [==============================] - ETA: 0s - loss: 0.2631 - mean_absolute_error: 0.439 - 0s 225us/step - loss: 0.2456 - mean_absolute_error: 0.4145 - val_loss: 0.1427 - val_mean_absolute_error: 0.2982

Epoch 00034: val_loss did not improve from 0.13229
Epoch 35/5000
102/102 [==============================] - ETA: 0s - loss: 0.2280 - mean_absolute_error: 0.405 - 0s 205us/step - loss: 0.2270 - mean_absolute_error: 0.4051 - val_loss: 0.1387 - val_mean_absolute_error: 0.2920

Epoch 00035: val_loss did not improve from 0.13229
Epoch 36/5000
102/102 [==============================] - ETA: 0s - loss: 0.2318 - mean_absolute_error: 0.411 - 0s 196us/step - loss: 0.2311 - mean_absolute_error: 0.4094 - val_loss: 0.1341 - val_mean_absolute_error: 0.2840

Epoch 00036: val_loss did not improve from 0.13229
Epoch 37/5000
102/102 [==============================] - ETA: 0s - loss: 0.1681 - mean_absolute_error: 0.340 - 0s 225us/step - loss: 0.2312 - mean_absolute_error: 0.3997 - val_loss: 0.1310 - val_mean_absolute_error: 0.2786

Epoch 00037: val_loss improved from 0.13229 to 0.13104, saving model to find_phone_dnn_model.h5
Epoch 38/5000
102/102 [==============================] - ETA: 0s - loss: 0.2252 - mean_absolute_error: 0.395 - 0s 205us/step - loss: 0.2335 - mean_absolute_error: 0.4064 - val_loss: 0.1313 - val_mean_absolute_error: 0.2794

Epoch 00038: val_loss did not improve from 0.13104
Epoch 39/5000
102/102 [==============================] - ETA: 0s - loss: 0.2070 - mean_absolute_error: 0.366 - 0s 215us/step - loss: 0.2172 - mean_absolute_error: 0.3921 - val_loss: 0.1334 - val_mean_absolute_error: 0.2836

Epoch 00039: val_loss did not improve from 0.13104
Epoch 40/5000
102/102 [==============================] - ETA: 0s - loss: 0.2361 - mean_absolute_error: 0.416 - 0s 205us/step - loss: 0.1982 - mean_absolute_error: 0.3760 - val_loss: 0.1347 - val_mean_absolute_error: 0.2861

Epoch 00040: val_loss did not improve from 0.13104
Epoch 41/5000
102/102 [==============================] - ETA: 0s - loss: 0.2445 - mean_absolute_error: 0.414 - 0s 215us/step - loss: 0.2298 - mean_absolute_error: 0.3974 - val_loss: 0.1353 - val_mean_absolute_error: 0.2873

Epoch 00041: val_loss did not improve from 0.13104
Epoch 42/5000
102/102 [==============================] - ETA: 0s - loss: 0.2764 - mean_absolute_error: 0.430 - 0s 186us/step - loss: 0.2291 - mean_absolute_error: 0.4008 - val_loss: 0.1365 - val_mean_absolute_error: 0.2893

Epoch 00042: val_loss did not improve from 0.13104
Epoch 43/5000
102/102 [==============================] - ETA: 0s - loss: 0.2352 - mean_absolute_error: 0.412 - 0s 205us/step - loss: 0.2239 - mean_absolute_error: 0.3921 - val_loss: 0.1390 - val_mean_absolute_error: 0.2935

Epoch 00043: val_loss did not improve from 0.13104
Epoch 44/5000
102/102 [==============================] - ETA: 0s - loss: 0.1643 - mean_absolute_error: 0.343 - 0s 196us/step - loss: 0.2145 - mean_absolute_error: 0.3870 - val_loss: 0.1425 - val_mean_absolute_error: 0.3000

Epoch 00044: val_loss did not improve from 0.13104
Epoch 45/5000
102/102 [==============================] - ETA: 0s - loss: 0.2126 - mean_absolute_error: 0.390 - 0s 225us/step - loss: 0.2122 - mean_absolute_error: 0.3880 - val_loss: 0.1437 - val_mean_absolute_error: 0.3028

Epoch 00045: val_loss did not improve from 0.13104
Epoch 46/5000
102/102 [==============================] - ETA: 0s - loss: 0.2726 - mean_absolute_error: 0.439 - 0s 215us/step - loss: 0.2127 - mean_absolute_error: 0.3875 - val_loss: 0.1458 - val_mean_absolute_error: 0.3068

Epoch 00046: val_loss did not improve from 0.13104
Epoch 47/5000
102/102 [==============================] - ETA: 0s - loss: 0.2447 - mean_absolute_error: 0.408 - 0s 205us/step - loss: 0.2183 - mean_absolute_error: 0.3973 - val_loss: 0.1456 - val_mean_absolute_error: 0.3068

Epoch 00047: val_loss did not improve from 0.13104
Epoch 48/5000
102/102 [==============================] - ETA: 0s - loss: 0.2120 - mean_absolute_error: 0.380 - 0s 205us/step - loss: 0.2222 - mean_absolute_error: 0.3971 - val_loss: 0.1442 - val_mean_absolute_error: 0.3046

Epoch 00048: val_loss did not improve from 0.13104
Epoch 49/5000
102/102 [==============================] - ETA: 0s - loss: 0.2648 - mean_absolute_error: 0.453 - 0s 215us/step - loss: 0.2400 - mean_absolute_error: 0.4150 - val_loss: 0.1437 - val_mean_absolute_error: 0.3034

Epoch 00049: val_loss did not improve from 0.13104
Epoch 50/5000
102/102 [==============================] - ETA: 0s - loss: 0.2319 - mean_absolute_error: 0.400 - 0s 225us/step - loss: 0.2328 - mean_absolute_error: 0.3975 - val_loss: 0.1428 - val_mean_absolute_error: 0.3015

Epoch 00050: val_loss did not improve from 0.13104
Epoch 51/5000
102/102 [==============================] - ETA: 0s - loss: 0.2001 - mean_absolute_error: 0.369 - 0s 215us/step - loss: 0.1851 - mean_absolute_error: 0.3589 - val_loss: 0.1416 - val_mean_absolute_error: 0.2995

Epoch 00051: val_loss did not improve from 0.13104
Epoch 52/5000
102/102 [==============================] - ETA: 0s - loss: 0.1928 - mean_absolute_error: 0.361 - 0s 205us/step - loss: 0.2016 - mean_absolute_error: 0.3709 - val_loss: 0.1400 - val_mean_absolute_error: 0.2970

Epoch 00052: val_loss did not improve from 0.13104
Epoch 53/5000
102/102 [==============================] - ETA: 0s - loss: 0.2071 - mean_absolute_error: 0.385 - 0s 215us/step - loss: 0.1858 - mean_absolute_error: 0.3600 - val_loss: 0.1370 - val_mean_absolute_error: 0.2918

Epoch 00053: val_loss did not improve from 0.13104
Epoch 54/5000
102/102 [==============================] - ETA: 0s - loss: 0.2595 - mean_absolute_error: 0.408 - 0s 225us/step - loss: 0.2170 - mean_absolute_error: 0.3823 - val_loss: 0.1319 - val_mean_absolute_error: 0.2840

Epoch 00054: val_loss did not improve from 0.13104
Epoch 55/5000
102/102 [==============================] - ETA: 0s - loss: 0.2014 - mean_absolute_error: 0.348 - 0s 205us/step - loss: 0.1927 - mean_absolute_error: 0.3562 - val_loss: 0.1276 - val_mean_absolute_error: 0.2781

Epoch 00055: val_loss improved from 0.13104 to 0.12761, saving model to find_phone_dnn_model.h5
Epoch 56/5000
102/102 [==============================] - ETA: 0s - loss: 0.2377 - mean_absolute_error: 0.410 - 0s 205us/step - loss: 0.2169 - mean_absolute_error: 0.3898 - val_loss: 0.1253 - val_mean_absolute_error: 0.2739

Epoch 00056: val_loss improved from 0.12761 to 0.12535, saving model to find_phone_dnn_model.h5
Epoch 57/5000
102/102 [==============================] - ETA: 0s - loss: 0.2361 - mean_absolute_error: 0.400 - 0s 205us/step - loss: 0.2090 - mean_absolute_error: 0.3793 - val_loss: 0.1259 - val_mean_absolute_error: 0.2740

Epoch 00057: val_loss did not improve from 0.12535
Epoch 58/5000
102/102 [==============================] - ETA: 0s - loss: 0.1768 - mean_absolute_error: 0.350 - 0s 195us/step - loss: 0.1969 - mean_absolute_error: 0.3740 - val_loss: 0.1295 - val_mean_absolute_error: 0.2786

Epoch 00058: val_loss did not improve from 0.12535
Epoch 59/5000
102/102 [==============================] - ETA: 0s - loss: 0.1965 - mean_absolute_error: 0.373 - 0s 254us/step - loss: 0.2186 - mean_absolute_error: 0.3982 - val_loss: 0.1352 - val_mean_absolute_error: 0.2866

Epoch 00059: val_loss did not improve from 0.12535
Epoch 60/5000
102/102 [==============================] - ETA: 0s - loss: 0.1976 - mean_absolute_error: 0.395 - 0s 264us/step - loss: 0.2130 - mean_absolute_error: 0.3954 - val_loss: 0.1410 - val_mean_absolute_error: 0.2943

Epoch 00060: val_loss did not improve from 0.12535
Epoch 61/5000
102/102 [==============================] - ETA: 0s - loss: 0.1690 - mean_absolute_error: 0.352 - 0s 235us/step - loss: 0.2113 - mean_absolute_error: 0.3838 - val_loss: 0.1457 - val_mean_absolute_error: 0.3005

Epoch 00061: val_loss did not improve from 0.12535
Epoch 62/5000
102/102 [==============================] - ETA: 0s - loss: 0.1759 - mean_absolute_error: 0.337 - 0s 274us/step - loss: 0.2044 - mean_absolute_error: 0.3669 - val_loss: 0.1470 - val_mean_absolute_error: 0.3023

Epoch 00062: val_loss did not improve from 0.12535
Epoch 63/5000
102/102 [==============================] - ETA: 0s - loss: 0.1660 - mean_absolute_error: 0.334 - 0s 225us/step - loss: 0.1797 - mean_absolute_error: 0.3575 - val_loss: 0.1470 - val_mean_absolute_error: 0.3024

Epoch 00063: val_loss did not improve from 0.12535
Epoch 64/5000
102/102 [==============================] - ETA: 0s - loss: 0.2371 - mean_absolute_error: 0.428 - 0s 225us/step - loss: 0.2110 - mean_absolute_error: 0.3835 - val_loss: 0.1460 - val_mean_absolute_error: 0.3012

Epoch 00064: val_loss did not improve from 0.12535
Epoch 65/5000
102/102 [==============================] - ETA: 0s - loss: 0.2313 - mean_absolute_error: 0.395 - 0s 225us/step - loss: 0.1977 - mean_absolute_error: 0.3631 - val_loss: 0.1450 - val_mean_absolute_error: 0.2996

Epoch 00065: val_loss did not improve from 0.12535
Epoch 66/5000
102/102 [==============================] - ETA: 0s - loss: 0.1675 - mean_absolute_error: 0.319 - 0s 196us/step - loss: 0.1670 - mean_absolute_error: 0.3239 - val_loss: 0.1424 - val_mean_absolute_error: 0.2956

Epoch 00066: val_loss did not improve from 0.12535
Epoch 67/5000
102/102 [==============================] - ETA: 0s - loss: 0.1364 - mean_absolute_error: 0.299 - 0s 215us/step - loss: 0.1852 - mean_absolute_error: 0.3536 - val_loss: 0.1396 - val_mean_absolute_error: 0.2915

Epoch 00067: val_loss did not improve from 0.12535
Epoch 68/5000
102/102 [==============================] - ETA: 0s - loss: 0.1686 - mean_absolute_error: 0.348 - 0s 225us/step - loss: 0.1791 - mean_absolute_error: 0.3500 - val_loss: 0.1373 - val_mean_absolute_error: 0.2882

Epoch 00068: val_loss did not improve from 0.12535
Epoch 69/5000
102/102 [==============================] - ETA: 0s - loss: 0.2022 - mean_absolute_error: 0.376 - 0s 196us/step - loss: 0.1772 - mean_absolute_error: 0.3465 - val_loss: 0.1361 - val_mean_absolute_error: 0.2866

Epoch 00069: val_loss did not improve from 0.12535
Epoch 70/5000
102/102 [==============================] - ETA: 0s - loss: 0.1881 - mean_absolute_error: 0.360 - 0s 235us/step - loss: 0.1753 - mean_absolute_error: 0.3414 - val_loss: 0.1340 - val_mean_absolute_error: 0.2840

Epoch 00070: val_loss did not improve from 0.12535
Epoch 71/5000
102/102 [==============================] - ETA: 0s - loss: 0.1964 - mean_absolute_error: 0.335 - 0s 235us/step - loss: 0.1924 - mean_absolute_error: 0.3512 - val_loss: 0.1353 - val_mean_absolute_error: 0.2867

Epoch 00071: val_loss did not improve from 0.12535
Epoch 72/5000
102/102 [==============================] - ETA: 0s - loss: 0.1383 - mean_absolute_error: 0.294 - 0s 244us/step - loss: 0.1834 - mean_absolute_error: 0.3534 - val_loss: 0.1356 - val_mean_absolute_error: 0.2876

Epoch 00072: val_loss did not improve from 0.12535
Epoch 73/5000
102/102 [==============================] - ETA: 0s - loss: 0.2037 - mean_absolute_error: 0.387 - 0s 196us/step - loss: 0.1920 - mean_absolute_error: 0.3599 - val_loss: 0.1365 - val_mean_absolute_error: 0.2887

Epoch 00073: val_loss did not improve from 0.12535
Epoch 74/5000
102/102 [==============================] - ETA: 0s - loss: 0.1510 - mean_absolute_error: 0.327 - 0s 235us/step - loss: 0.1636 - mean_absolute_error: 0.3301 - val_loss: 0.1369 - val_mean_absolute_error: 0.2894

Epoch 00074: val_loss did not improve from 0.12535
Epoch 75/5000
102/102 [==============================] - ETA: 0s - loss: 0.2080 - mean_absolute_error: 0.380 - 0s 205us/step - loss: 0.1850 - mean_absolute_error: 0.3491 - val_loss: 0.1364 - val_mean_absolute_error: 0.2887

Epoch 00075: val_loss did not improve from 0.12535
Epoch 76/5000
102/102 [==============================] - ETA: 0s - loss: 0.1389 - mean_absolute_error: 0.311 - 0s 196us/step - loss: 0.1828 - mean_absolute_error: 0.3563 - val_loss: 0.1349 - val_mean_absolute_error: 0.2856

Epoch 00076: val_loss did not improve from 0.12535
Epoch 77/5000
102/102 [==============================] - ETA: 0s - loss: 0.2036 - mean_absolute_error: 0.371 - 0s 186us/step - loss: 0.1739 - mean_absolute_error: 0.3360 - val_loss: 0.1343 - val_mean_absolute_error: 0.2840

Epoch 00077: val_loss did not improve from 0.12535
Epoch 78/5000
102/102 [==============================] - ETA: 0s - loss: 0.2012 - mean_absolute_error: 0.375 - 0s 225us/step - loss: 0.1646 - mean_absolute_error: 0.3305 - val_loss: 0.1341 - val_mean_absolute_error: 0.2834

Epoch 00078: val_loss did not improve from 0.12535
Epoch 79/5000
102/102 [==============================] - ETA: 0s - loss: 0.1842 - mean_absolute_error: 0.331 - 0s 215us/step - loss: 0.1721 - mean_absolute_error: 0.3334 - val_loss: 0.1334 - val_mean_absolute_error: 0.2824

Epoch 00079: val_loss did not improve from 0.12535
Epoch 80/5000
102/102 [==============================] - ETA: 0s - loss: 0.1811 - mean_absolute_error: 0.347 - 0s 235us/step - loss: 0.1917 - mean_absolute_error: 0.3579 - val_loss: 0.1349 - val_mean_absolute_error: 0.2848

Epoch 00080: val_loss did not improve from 0.12535
Epoch 81/5000
102/102 [==============================] - ETA: 0s - loss: 0.1903 - mean_absolute_error: 0.379 - 0s 205us/step - loss: 0.1955 - mean_absolute_error: 0.3700 - val_loss: 0.1346 - val_mean_absolute_error: 0.2843

Epoch 00081: val_loss did not improve from 0.12535
Epoch 82/5000
102/102 [==============================] - ETA: 0s - loss: 0.1998 - mean_absolute_error: 0.376 - 0s 244us/step - loss: 0.1798 - mean_absolute_error: 0.3504 - val_loss: 0.1336 - val_mean_absolute_error: 0.2829

Epoch 00082: val_loss did not improve from 0.12535
Epoch 83/5000
102/102 [==============================] - ETA: 0s - loss: 0.1440 - mean_absolute_error: 0.305 - 0s 215us/step - loss: 0.1600 - mean_absolute_error: 0.3282 - val_loss: 0.1318 - val_mean_absolute_error: 0.2796

Epoch 00083: val_loss did not improve from 0.12535
Epoch 84/5000
102/102 [==============================] - ETA: 0s - loss: 0.2073 - mean_absolute_error: 0.379 - 0s 225us/step - loss: 0.1728 - mean_absolute_error: 0.3521 - val_loss: 0.1325 - val_mean_absolute_error: 0.2800

Epoch 00084: val_loss did not improve from 0.12535
Epoch 85/5000
102/102 [==============================] - ETA: 0s - loss: 0.1944 - mean_absolute_error: 0.362 - 0s 235us/step - loss: 0.1795 - mean_absolute_error: 0.3467 - val_loss: 0.1327 - val_mean_absolute_error: 0.2804

Epoch 00085: val_loss did not improve from 0.12535
Epoch 86/5000
102/102 [==============================] - ETA: 0s - loss: 0.1651 - mean_absolute_error: 0.313 - 0s 225us/step - loss: 0.1710 - mean_absolute_error: 0.3324 - val_loss: 0.1317 - val_mean_absolute_error: 0.2789

Epoch 00086: val_loss did not improve from 0.12535
Epoch 87/5000
102/102 [==============================] - ETA: 0s - loss: 0.1648 - mean_absolute_error: 0.348 - 0s 215us/step - loss: 0.1823 - mean_absolute_error: 0.3597 - val_loss: 0.1315 - val_mean_absolute_error: 0.2779

Epoch 00087: val_loss did not improve from 0.12535
Epoch 88/5000
102/102 [==============================] - ETA: 0s - loss: 0.1921 - mean_absolute_error: 0.360 - 0s 196us/step - loss: 0.1883 - mean_absolute_error: 0.3595 - val_loss: 0.1325 - val_mean_absolute_error: 0.2780

Epoch 00088: val_loss did not improve from 0.12535
Epoch 89/5000
102/102 [==============================] - ETA: 0s - loss: 0.1631 - mean_absolute_error: 0.337 - 0s 196us/step - loss: 0.1762 - mean_absolute_error: 0.3503 - val_loss: 0.1346 - val_mean_absolute_error: 0.2805

Epoch 00089: val_loss did not improve from 0.12535
Epoch 90/5000
102/102 [==============================] - ETA: 0s - loss: 0.1480 - mean_absolute_error: 0.316 - 0s 176us/step - loss: 0.1561 - mean_absolute_error: 0.3165 - val_loss: 0.1369 - val_mean_absolute_error: 0.2834

Epoch 00090: val_loss did not improve from 0.12535
Epoch 91/5000
102/102 [==============================] - ETA: 0s - loss: 0.1887 - mean_absolute_error: 0.365 - 0s 225us/step - loss: 0.1866 - mean_absolute_error: 0.3576 - val_loss: 0.1372 - val_mean_absolute_error: 0.2832

Epoch 00091: val_loss did not improve from 0.12535
Epoch 92/5000
102/102 [==============================] - ETA: 0s - loss: 0.2069 - mean_absolute_error: 0.348 - 0s 205us/step - loss: 0.1784 - mean_absolute_error: 0.3411 - val_loss: 0.1369 - val_mean_absolute_error: 0.2829

Epoch 00092: val_loss did not improve from 0.12535
Epoch 93/5000
102/102 [==============================] - ETA: 0s - loss: 0.2118 - mean_absolute_error: 0.389 - 0s 235us/step - loss: 0.1741 - mean_absolute_error: 0.3358 - val_loss: 0.1348 - val_mean_absolute_error: 0.2795

Epoch 00093: val_loss did not improve from 0.12535
Epoch 94/5000
102/102 [==============================] - ETA: 0s - loss: 0.1615 - mean_absolute_error: 0.296 - 0s 205us/step - loss: 0.1666 - mean_absolute_error: 0.3281 - val_loss: 0.1331 - val_mean_absolute_error: 0.2768

Epoch 00094: val_loss did not improve from 0.12535
Epoch 95/5000
102/102 [==============================] - ETA: 0s - loss: 0.1476 - mean_absolute_error: 0.314 - 0s 244us/step - loss: 0.1499 - mean_absolute_error: 0.3107 - val_loss: 0.1316 - val_mean_absolute_error: 0.2750

Epoch 00095: val_loss did not improve from 0.12535
Epoch 96/5000
102/102 [==============================] - ETA: 0s - loss: 0.1429 - mean_absolute_error: 0.314 - 0s 196us/step - loss: 0.1545 - mean_absolute_error: 0.3152 - val_loss: 0.1329 - val_mean_absolute_error: 0.2776

Epoch 00096: val_loss did not improve from 0.12535
Epoch 97/5000
102/102 [==============================] - ETA: 0s - loss: 0.1652 - mean_absolute_error: 0.316 - 0s 205us/step - loss: 0.1642 - mean_absolute_error: 0.3287 - val_loss: 0.1354 - val_mean_absolute_error: 0.2819

Epoch 00097: val_loss did not improve from 0.12535
Epoch 98/5000
102/102 [==============================] - ETA: 0s - loss: 0.1768 - mean_absolute_error: 0.355 - 0s 225us/step - loss: 0.1708 - mean_absolute_error: 0.3342 - val_loss: 0.1376 - val_mean_absolute_error: 0.2855

Epoch 00098: val_loss did not improve from 0.12535
Epoch 99/5000
102/102 [==============================] - ETA: 0s - loss: 0.1938 - mean_absolute_error: 0.366 - 0s 205us/step - loss: 0.1893 - mean_absolute_error: 0.3568 - val_loss: 0.1382 - val_mean_absolute_error: 0.2866

Epoch 00099: val_loss did not improve from 0.12535
Epoch 100/5000
102/102 [==============================] - ETA: 0s - loss: 0.1553 - mean_absolute_error: 0.321 - 0s 244us/step - loss: 0.1622 - mean_absolute_error: 0.3261 - val_loss: 0.1371 - val_mean_absolute_error: 0.2854

Epoch 00100: val_loss did not improve from 0.12535
Epoch 101/5000
102/102 [==============================] - ETA: 0s - loss: 0.2072 - mean_absolute_error: 0.380 - 0s 235us/step - loss: 0.1598 - mean_absolute_error: 0.3324 - val_loss: 0.1346 - val_mean_absolute_error: 0.2821

Epoch 00101: val_loss did not improve from 0.12535
Epoch 102/5000
102/102 [==============================] - ETA: 0s - loss: 0.2149 - mean_absolute_error: 0.375 - 0s 196us/step - loss: 0.1702 - mean_absolute_error: 0.3346 - val_loss: 0.1304 - val_mean_absolute_error: 0.2758

Epoch 00102: val_loss did not improve from 0.12535
Epoch 103/5000
102/102 [==============================] - ETA: 0s - loss: 0.1429 - mean_absolute_error: 0.309 - 0s 225us/step - loss: 0.1409 - mean_absolute_error: 0.2938 - val_loss: 0.1270 - val_mean_absolute_error: 0.2711

Epoch 00103: val_loss did not improve from 0.12535
Epoch 104/5000
102/102 [==============================] - ETA: 0s - loss: 0.1833 - mean_absolute_error: 0.337 - 0s 196us/step - loss: 0.1522 - mean_absolute_error: 0.3145 - val_loss: 0.1231 - val_mean_absolute_error: 0.2655

Epoch 00104: val_loss improved from 0.12535 to 0.12313, saving model to find_phone_dnn_model.h5
Epoch 105/5000
102/102 [==============================] - ETA: 0s - loss: 0.1458 - mean_absolute_error: 0.305 - 0s 186us/step - loss: 0.1477 - mean_absolute_error: 0.3079 - val_loss: 0.1201 - val_mean_absolute_error: 0.2620

Epoch 00105: val_loss improved from 0.12313 to 0.12007, saving model to find_phone_dnn_model.h5
Epoch 106/5000
102/102 [==============================] - ETA: 0s - loss: 0.1250 - mean_absolute_error: 0.278 - 0s 186us/step - loss: 0.1434 - mean_absolute_error: 0.3037 - val_loss: 0.1201 - val_mean_absolute_error: 0.2617

Epoch 00106: val_loss improved from 0.12007 to 0.12006, saving model to find_phone_dnn_model.h5
Epoch 107/5000
102/102 [==============================] - ETA: 0s - loss: 0.1393 - mean_absolute_error: 0.299 - 0s 196us/step - loss: 0.1526 - mean_absolute_error: 0.3171 - val_loss: 0.1210 - val_mean_absolute_error: 0.2627

Epoch 00107: val_loss did not improve from 0.12006
Epoch 108/5000
102/102 [==============================] - ETA: 0s - loss: 0.1221 - mean_absolute_error: 0.284 - 0s 196us/step - loss: 0.1315 - mean_absolute_error: 0.2942 - val_loss: 0.1195 - val_mean_absolute_error: 0.2604

Epoch 00108: val_loss improved from 0.12006 to 0.11953, saving model to find_phone_dnn_model.h5
Epoch 109/5000
102/102 [==============================] - ETA: 0s - loss: 0.1721 - mean_absolute_error: 0.343 - 0s 215us/step - loss: 0.1723 - mean_absolute_error: 0.3383 - val_loss: 0.1188 - val_mean_absolute_error: 0.2594

Epoch 00109: val_loss improved from 0.11953 to 0.11885, saving model to find_phone_dnn_model.h5
Epoch 110/5000
102/102 [==============================] - ETA: 0s - loss: 0.1692 - mean_absolute_error: 0.337 - 0s 205us/step - loss: 0.1805 - mean_absolute_error: 0.3422 - val_loss: 0.1189 - val_mean_absolute_error: 0.2603

Epoch 00110: val_loss did not improve from 0.11885
Epoch 111/5000
102/102 [==============================] - ETA: 0s - loss: 0.1649 - mean_absolute_error: 0.313 - 0s 205us/step - loss: 0.1660 - mean_absolute_error: 0.3258 - val_loss: 0.1195 - val_mean_absolute_error: 0.2616

Epoch 00111: val_loss did not improve from 0.11885
Epoch 112/5000
102/102 [==============================] - ETA: 0s - loss: 0.1726 - mean_absolute_error: 0.344 - 0s 196us/step - loss: 0.1493 - mean_absolute_error: 0.3151 - val_loss: 0.1190 - val_mean_absolute_error: 0.2604

Epoch 00112: val_loss did not improve from 0.11885
Epoch 113/5000
102/102 [==============================] - ETA: 0s - loss: 0.1033 - mean_absolute_error: 0.260 - 0s 206us/step - loss: 0.1348 - mean_absolute_error: 0.2933 - val_loss: 0.1164 - val_mean_absolute_error: 0.2570

Epoch 00113: val_loss improved from 0.11885 to 0.11643, saving model to find_phone_dnn_model.h5
Epoch 114/5000
102/102 [==============================] - ETA: 0s - loss: 0.1308 - mean_absolute_error: 0.300 - 0s 185us/step - loss: 0.1483 - mean_absolute_error: 0.3121 - val_loss: 0.1123 - val_mean_absolute_error: 0.2514

Epoch 00114: val_loss improved from 0.11643 to 0.11228, saving model to find_phone_dnn_model.h5
Epoch 115/5000
102/102 [==============================] - ETA: 0s - loss: 0.1483 - mean_absolute_error: 0.310 - 0s 185us/step - loss: 0.1394 - mean_absolute_error: 0.2996 - val_loss: 0.1094 - val_mean_absolute_error: 0.2467

Epoch 00115: val_loss improved from 0.11228 to 0.10936, saving model to find_phone_dnn_model.h5
Epoch 116/5000
102/102 [==============================] - ETA: 0s - loss: 0.1542 - mean_absolute_error: 0.325 - 0s 196us/step - loss: 0.1343 - mean_absolute_error: 0.2981 - val_loss: 0.1072 - val_mean_absolute_error: 0.2430

Epoch 00116: val_loss improved from 0.10936 to 0.10721, saving model to find_phone_dnn_model.h5
Epoch 117/5000
102/102 [==============================] - ETA: 0s - loss: 0.1476 - mean_absolute_error: 0.314 - 0s 186us/step - loss: 0.1446 - mean_absolute_error: 0.3043 - val_loss: 0.1069 - val_mean_absolute_error: 0.2425

Epoch 00117: val_loss improved from 0.10721 to 0.10685, saving model to find_phone_dnn_model.h5
Epoch 118/5000
102/102 [==============================] - ETA: 0s - loss: 0.1500 - mean_absolute_error: 0.304 - 0s 196us/step - loss: 0.1644 - mean_absolute_error: 0.3196 - val_loss: 0.1081 - val_mean_absolute_error: 0.2445

Epoch 00118: val_loss did not improve from 0.10685
Epoch 119/5000
102/102 [==============================] - ETA: 0s - loss: 0.1502 - mean_absolute_error: 0.318 - 0s 205us/step - loss: 0.1366 - mean_absolute_error: 0.3037 - val_loss: 0.1102 - val_mean_absolute_error: 0.2477

Epoch 00119: val_loss did not improve from 0.10685
Epoch 120/5000
102/102 [==============================] - ETA: 0s - loss: 0.1464 - mean_absolute_error: 0.301 - 0s 186us/step - loss: 0.1449 - mean_absolute_error: 0.3040 - val_loss: 0.1090 - val_mean_absolute_error: 0.2476

Epoch 00120: val_loss did not improve from 0.10685
Epoch 121/5000
102/102 [==============================] - ETA: 0s - loss: 0.1819 - mean_absolute_error: 0.365 - 0s 196us/step - loss: 0.1437 - mean_absolute_error: 0.3138 - val_loss: 0.1102 - val_mean_absolute_error: 0.2497

Epoch 00121: val_loss did not improve from 0.10685
Epoch 122/5000
102/102 [==============================] - ETA: 0s - loss: 0.1234 - mean_absolute_error: 0.280 - 0s 215us/step - loss: 0.1400 - mean_absolute_error: 0.2985 - val_loss: 0.1135 - val_mean_absolute_error: 0.2531

Epoch 00122: val_loss did not improve from 0.10685
Epoch 123/5000
102/102 [==============================] - ETA: 0s - loss: 0.1022 - mean_absolute_error: 0.245 - 0s 215us/step - loss: 0.1254 - mean_absolute_error: 0.2733 - val_loss: 0.1149 - val_mean_absolute_error: 0.2542

Epoch 00123: val_loss did not improve from 0.10685
Epoch 124/5000
102/102 [==============================] - ETA: 0s - loss: 0.1129 - mean_absolute_error: 0.278 - 0s 215us/step - loss: 0.1304 - mean_absolute_error: 0.2919 - val_loss: 0.1174 - val_mean_absolute_error: 0.2561

Epoch 00124: val_loss did not improve from 0.10685
Epoch 125/5000
102/102 [==============================] - ETA: 0s - loss: 0.1727 - mean_absolute_error: 0.331 - 0s 235us/step - loss: 0.1542 - mean_absolute_error: 0.3095 - val_loss: 0.1160 - val_mean_absolute_error: 0.2540

Epoch 00125: val_loss did not improve from 0.10685
Epoch 126/5000
102/102 [==============================] - ETA: 0s - loss: 0.1432 - mean_absolute_error: 0.306 - 0s 225us/step - loss: 0.1373 - mean_absolute_error: 0.2947 - val_loss: 0.1102 - val_mean_absolute_error: 0.2483

Epoch 00126: val_loss did not improve from 0.10685
Epoch 127/5000
102/102 [==============================] - ETA: 0s - loss: 0.1793 - mean_absolute_error: 0.349 - 0s 244us/step - loss: 0.1520 - mean_absolute_error: 0.3152 - val_loss: 0.1047 - val_mean_absolute_error: 0.2428

Epoch 00127: val_loss improved from 0.10685 to 0.10473, saving model to find_phone_dnn_model.h5
Epoch 128/5000
102/102 [==============================] - ETA: 0s - loss: 0.1725 - mean_absolute_error: 0.337 - 0s 196us/step - loss: 0.1626 - mean_absolute_error: 0.3230 - val_loss: 0.1040 - val_mean_absolute_error: 0.2427

Epoch 00128: val_loss improved from 0.10473 to 0.10397, saving model to find_phone_dnn_model.h5
Epoch 129/5000
102/102 [==============================] - ETA: 0s - loss: 0.1234 - mean_absolute_error: 0.279 - 0s 234us/step - loss: 0.1365 - mean_absolute_error: 0.2985 - val_loss: 0.1104 - val_mean_absolute_error: 0.2510

Epoch 00129: val_loss did not improve from 0.10397
Epoch 130/5000
102/102 [==============================] - ETA: 0s - loss: 0.1618 - mean_absolute_error: 0.324 - 0s 186us/step - loss: 0.1603 - mean_absolute_error: 0.3219 - val_loss: 0.1164 - val_mean_absolute_error: 0.2590

Epoch 00130: val_loss did not improve from 0.10397
Epoch 131/5000
102/102 [==============================] - ETA: 0s - loss: 0.1544 - mean_absolute_error: 0.324 - 0s 215us/step - loss: 0.1313 - mean_absolute_error: 0.2926 - val_loss: 0.1197 - val_mean_absolute_error: 0.2630

Epoch 00131: val_loss did not improve from 0.10397
Epoch 132/5000
102/102 [==============================] - ETA: 0s - loss: 0.2112 - mean_absolute_error: 0.359 - 0s 196us/step - loss: 0.1610 - mean_absolute_error: 0.3205 - val_loss: 0.1189 - val_mean_absolute_error: 0.2622

Epoch 00132: val_loss did not improve from 0.10397
Epoch 133/5000
102/102 [==============================] - ETA: 0s - loss: 0.1679 - mean_absolute_error: 0.341 - 0s 215us/step - loss: 0.1517 - mean_absolute_error: 0.3132 - val_loss: 0.1178 - val_mean_absolute_error: 0.2609

Epoch 00133: val_loss did not improve from 0.10397
Epoch 134/5000
102/102 [==============================] - ETA: 0s - loss: 0.1622 - mean_absolute_error: 0.321 - 0s 215us/step - loss: 0.1476 - mean_absolute_error: 0.3034 - val_loss: 0.1163 - val_mean_absolute_error: 0.2600

Epoch 00134: val_loss did not improve from 0.10397
Epoch 135/5000
102/102 [==============================] - ETA: 0s - loss: 0.1231 - mean_absolute_error: 0.284 - 0s 186us/step - loss: 0.1353 - mean_absolute_error: 0.3012 - val_loss: 0.1163 - val_mean_absolute_error: 0.2606

Epoch 00135: val_loss did not improve from 0.10397
Epoch 136/5000
102/102 [==============================] - ETA: 0s - loss: 0.1708 - mean_absolute_error: 0.342 - 0s 186us/step - loss: 0.1567 - mean_absolute_error: 0.3205 - val_loss: 0.1156 - val_mean_absolute_error: 0.2600

Epoch 00136: val_loss did not improve from 0.10397
Epoch 137/5000
102/102 [==============================] - ETA: 0s - loss: 0.1409 - mean_absolute_error: 0.307 - 0s 186us/step - loss: 0.1426 - mean_absolute_error: 0.3060 - val_loss: 0.1143 - val_mean_absolute_error: 0.2585

Epoch 00137: val_loss did not improve from 0.10397
Epoch 138/5000
102/102 [==============================] - ETA: 0s - loss: 0.1492 - mean_absolute_error: 0.306 - 0s 205us/step - loss: 0.1486 - mean_absolute_error: 0.3108 - val_loss: 0.1129 - val_mean_absolute_error: 0.2570

Epoch 00138: val_loss did not improve from 0.10397
Epoch 139/5000
102/102 [==============================] - ETA: 0s - loss: 0.1217 - mean_absolute_error: 0.274 - 0s 205us/step - loss: 0.1413 - mean_absolute_error: 0.3039 - val_loss: 0.1118 - val_mean_absolute_error: 0.2563

Epoch 00139: val_loss did not improve from 0.10397
Epoch 140/5000
102/102 [==============================] - ETA: 0s - loss: 0.1397 - mean_absolute_error: 0.305 - 0s 196us/step - loss: 0.1435 - mean_absolute_error: 0.3047 - val_loss: 0.1115 - val_mean_absolute_error: 0.2559

Epoch 00140: val_loss did not improve from 0.10397
Epoch 141/5000
102/102 [==============================] - ETA: 0s - loss: 0.1018 - mean_absolute_error: 0.257 - 0s 205us/step - loss: 0.1301 - mean_absolute_error: 0.2906 - val_loss: 0.1103 - val_mean_absolute_error: 0.2544

Epoch 00141: val_loss did not improve from 0.10397
Epoch 142/5000
102/102 [==============================] - ETA: 0s - loss: 0.1505 - mean_absolute_error: 0.306 - 0s 215us/step - loss: 0.1294 - mean_absolute_error: 0.2857 - val_loss: 0.1084 - val_mean_absolute_error: 0.2515

Epoch 00142: val_loss did not improve from 0.10397
Epoch 143/5000
102/102 [==============================] - ETA: 0s - loss: 0.1088 - mean_absolute_error: 0.271 - 0s 205us/step - loss: 0.1339 - mean_absolute_error: 0.2923 - val_loss: 0.1085 - val_mean_absolute_error: 0.2503

Epoch 00143: val_loss did not improve from 0.10397
Epoch 144/5000
102/102 [==============================] - ETA: 0s - loss: 0.1431 - mean_absolute_error: 0.305 - 0s 196us/step - loss: 0.1310 - mean_absolute_error: 0.2899 - val_loss: 0.1066 - val_mean_absolute_error: 0.2470

Epoch 00144: val_loss did not improve from 0.10397
Epoch 145/5000
102/102 [==============================] - ETA: 0s - loss: 0.1197 - mean_absolute_error: 0.281 - 0s 215us/step - loss: 0.1320 - mean_absolute_error: 0.2913 - val_loss: 0.1051 - val_mean_absolute_error: 0.2446

Epoch 00145: val_loss did not improve from 0.10397
Epoch 146/5000
102/102 [==============================] - ETA: 0s - loss: 0.1316 - mean_absolute_error: 0.294 - 0s 303us/step - loss: 0.1463 - mean_absolute_error: 0.3064 - val_loss: 0.1036 - val_mean_absolute_error: 0.2429

Epoch 00146: val_loss improved from 0.10397 to 0.10363, saving model to find_phone_dnn_model.h5
Epoch 147/5000
102/102 [==============================] - ETA: 0s - loss: 0.1225 - mean_absolute_error: 0.272 - 0s 225us/step - loss: 0.1258 - mean_absolute_error: 0.2770 - val_loss: 0.1026 - val_mean_absolute_error: 0.2417

Epoch 00147: val_loss improved from 0.10363 to 0.10261, saving model to find_phone_dnn_model.h5
Epoch 148/5000
102/102 [==============================] - ETA: 0s - loss: 0.1538 - mean_absolute_error: 0.319 - 0s 196us/step - loss: 0.1262 - mean_absolute_error: 0.2848 - val_loss: 0.1016 - val_mean_absolute_error: 0.2408

Epoch 00148: val_loss improved from 0.10261 to 0.10159, saving model to find_phone_dnn_model.h5
Epoch 149/5000
102/102 [==============================] - ETA: 0s - loss: 0.1610 - mean_absolute_error: 0.327 - 0s 186us/step - loss: 0.1299 - mean_absolute_error: 0.2828 - val_loss: 0.1008 - val_mean_absolute_error: 0.2411

Epoch 00149: val_loss improved from 0.10159 to 0.10080, saving model to find_phone_dnn_model.h5
Epoch 150/5000
102/102 [==============================] - ETA: 0s - loss: 0.1359 - mean_absolute_error: 0.291 - 0s 215us/step - loss: 0.1414 - mean_absolute_error: 0.3012 - val_loss: 0.0984 - val_mean_absolute_error: 0.2393

Epoch 00150: val_loss improved from 0.10080 to 0.09840, saving model to find_phone_dnn_model.h5
Epoch 151/5000
102/102 [==============================] - ETA: 0s - loss: 0.1428 - mean_absolute_error: 0.296 - 0s 235us/step - loss: 0.1484 - mean_absolute_error: 0.3129 - val_loss: 0.0950 - val_mean_absolute_error: 0.2358

Epoch 00151: val_loss improved from 0.09840 to 0.09497, saving model to find_phone_dnn_model.h5
Epoch 152/5000
102/102 [==============================] - ETA: 0s - loss: 0.1060 - mean_absolute_error: 0.257 - 0s 176us/step - loss: 0.1200 - mean_absolute_error: 0.2826 - val_loss: 0.0929 - val_mean_absolute_error: 0.2330

Epoch 00152: val_loss improved from 0.09497 to 0.09295, saving model to find_phone_dnn_model.h5
Epoch 153/5000
102/102 [==============================] - ETA: 0s - loss: 0.0768 - mean_absolute_error: 0.221 - 0s 176us/step - loss: 0.1102 - mean_absolute_error: 0.2617 - val_loss: 0.0956 - val_mean_absolute_error: 0.2358

Epoch 00153: val_loss did not improve from 0.09295
Epoch 154/5000
102/102 [==============================] - ETA: 0s - loss: 0.1129 - mean_absolute_error: 0.260 - 0s 186us/step - loss: 0.1189 - mean_absolute_error: 0.2737 - val_loss: 0.1006 - val_mean_absolute_error: 0.2413

Epoch 00154: val_loss did not improve from 0.09295
Epoch 155/5000
102/102 [==============================] - ETA: 0s - loss: 0.1300 - mean_absolute_error: 0.290 - 0s 186us/step - loss: 0.1346 - mean_absolute_error: 0.2918 - val_loss: 0.1070 - val_mean_absolute_error: 0.2479

Epoch 00155: val_loss did not improve from 0.09295
Epoch 156/5000
102/102 [==============================] - ETA: 0s - loss: 0.1462 - mean_absolute_error: 0.311 - 0s 205us/step - loss: 0.1322 - mean_absolute_error: 0.2927 - val_loss: 0.1086 - val_mean_absolute_error: 0.2487

Epoch 00156: val_loss did not improve from 0.09295
Epoch 157/5000
102/102 [==============================] - ETA: 0s - loss: 0.1175 - mean_absolute_error: 0.270 - 0s 205us/step - loss: 0.1185 - mean_absolute_error: 0.2715 - val_loss: 0.1089 - val_mean_absolute_error: 0.2485

Epoch 00157: val_loss did not improve from 0.09295
Epoch 158/5000
102/102 [==============================] - ETA: 0s - loss: 0.1614 - mean_absolute_error: 0.322 - 0s 196us/step - loss: 0.1501 - mean_absolute_error: 0.3103 - val_loss: 0.1088 - val_mean_absolute_error: 0.2475

Epoch 00158: val_loss did not improve from 0.09295
Epoch 159/5000
102/102 [==============================] - ETA: 0s - loss: 0.1499 - mean_absolute_error: 0.317 - 0s 205us/step - loss: 0.1366 - mean_absolute_error: 0.2957 - val_loss: 0.1065 - val_mean_absolute_error: 0.2442

Epoch 00159: val_loss did not improve from 0.09295
Epoch 160/5000
102/102 [==============================] - ETA: 0s - loss: 0.1213 - mean_absolute_error: 0.284 - 0s 215us/step - loss: 0.1142 - mean_absolute_error: 0.2662 - val_loss: 0.1023 - val_mean_absolute_error: 0.2389

Epoch 00160: val_loss did not improve from 0.09295
Epoch 161/5000
102/102 [==============================] - ETA: 0s - loss: 0.1340 - mean_absolute_error: 0.314 - 0s 205us/step - loss: 0.1167 - mean_absolute_error: 0.2719 - val_loss: 0.1008 - val_mean_absolute_error: 0.2375

Epoch 00161: val_loss did not improve from 0.09295
Epoch 162/5000
102/102 [==============================] - ETA: 0s - loss: 0.0955 - mean_absolute_error: 0.237 - 0s 196us/step - loss: 0.1000 - mean_absolute_error: 0.2443 - val_loss: 0.1011 - val_mean_absolute_error: 0.2379

Epoch 00162: val_loss did not improve from 0.09295
Epoch 163/5000
102/102 [==============================] - ETA: 0s - loss: 0.1432 - mean_absolute_error: 0.299 - 0s 215us/step - loss: 0.1302 - mean_absolute_error: 0.2953 - val_loss: 0.1023 - val_mean_absolute_error: 0.2397

Epoch 00163: val_loss did not improve from 0.09295
Epoch 164/5000
102/102 [==============================] - ETA: 0s - loss: 0.1058 - mean_absolute_error: 0.263 - 0s 196us/step - loss: 0.1269 - mean_absolute_error: 0.2853 - val_loss: 0.1037 - val_mean_absolute_error: 0.2417

Epoch 00164: val_loss did not improve from 0.09295
Epoch 165/5000
102/102 [==============================] - ETA: 0s - loss: 0.1474 - mean_absolute_error: 0.304 - 0s 205us/step - loss: 0.1306 - mean_absolute_error: 0.2887 - val_loss: 0.1050 - val_mean_absolute_error: 0.2439

Epoch 00165: val_loss did not improve from 0.09295
Epoch 166/5000
102/102 [==============================] - ETA: 0s - loss: 0.1324 - mean_absolute_error: 0.302 - 0s 196us/step - loss: 0.1198 - mean_absolute_error: 0.2729 - val_loss: 0.1050 - val_mean_absolute_error: 0.2446

Epoch 00166: val_loss did not improve from 0.09295
Epoch 167/5000
102/102 [==============================] - ETA: 0s - loss: 0.0856 - mean_absolute_error: 0.225 - 0s 205us/step - loss: 0.1072 - mean_absolute_error: 0.2541 - val_loss: 0.1030 - val_mean_absolute_error: 0.2428

Epoch 00167: val_loss did not improve from 0.09295
Epoch 168/5000
102/102 [==============================] - ETA: 0s - loss: 0.1825 - mean_absolute_error: 0.353 - 0s 196us/step - loss: 0.1257 - mean_absolute_error: 0.2832 - val_loss: 0.1042 - val_mean_absolute_error: 0.2439

Epoch 00168: val_loss did not improve from 0.09295
Epoch 169/5000
102/102 [==============================] - ETA: 0s - loss: 0.0956 - mean_absolute_error: 0.243 - 0s 205us/step - loss: 0.1130 - mean_absolute_error: 0.2632 - val_loss: 0.1050 - val_mean_absolute_error: 0.2440

Epoch 00169: val_loss did not improve from 0.09295
Epoch 170/5000
102/102 [==============================] - ETA: 0s - loss: 0.1744 - mean_absolute_error: 0.340 - 0s 205us/step - loss: 0.1319 - mean_absolute_error: 0.2920 - val_loss: 0.1066 - val_mean_absolute_error: 0.2453

Epoch 00170: val_loss did not improve from 0.09295
Epoch 171/5000
102/102 [==============================] - ETA: 0s - loss: 0.1146 - mean_absolute_error: 0.269 - 0s 254us/step - loss: 0.1231 - mean_absolute_error: 0.2770 - val_loss: 0.1073 - val_mean_absolute_error: 0.2461

Epoch 00171: val_loss did not improve from 0.09295
Epoch 172/5000
102/102 [==============================] - ETA: 0s - loss: 0.1423 - mean_absolute_error: 0.301 - 0s 225us/step - loss: 0.1476 - mean_absolute_error: 0.3101 - val_loss: 0.1075 - val_mean_absolute_error: 0.2467

Epoch 00172: val_loss did not improve from 0.09295
Epoch 173/5000
102/102 [==============================] - ETA: 0s - loss: 0.1570 - mean_absolute_error: 0.304 - 0s 186us/step - loss: 0.1374 - mean_absolute_error: 0.2960 - val_loss: 0.1071 - val_mean_absolute_error: 0.2470

Epoch 00173: val_loss did not improve from 0.09295
Epoch 174/5000
102/102 [==============================] - ETA: 0s - loss: 0.1520 - mean_absolute_error: 0.320 - 0s 205us/step - loss: 0.1431 - mean_absolute_error: 0.3023 - val_loss: 0.1064 - val_mean_absolute_error: 0.2468

Epoch 00174: val_loss did not improve from 0.09295
Epoch 175/5000
102/102 [==============================] - ETA: 0s - loss: 0.1224 - mean_absolute_error: 0.286 - 0s 205us/step - loss: 0.1375 - mean_absolute_error: 0.2999 - val_loss: 0.1049 - val_mean_absolute_error: 0.2448

Epoch 00175: val_loss did not improve from 0.09295
Epoch 176/5000
102/102 [==============================] - ETA: 0s - loss: 0.1303 - mean_absolute_error: 0.281 - 0s 225us/step - loss: 0.1324 - mean_absolute_error: 0.2990 - val_loss: 0.1034 - val_mean_absolute_error: 0.2425

Epoch 00176: val_loss did not improve from 0.09295
Epoch 177/5000
102/102 [==============================] - ETA: 0s - loss: 0.0973 - mean_absolute_error: 0.237 - 0s 196us/step - loss: 0.1136 - mean_absolute_error: 0.2649 - val_loss: 0.1017 - val_mean_absolute_error: 0.2397

Epoch 00177: val_loss did not improve from 0.09295
Epoch 178/5000
102/102 [==============================] - ETA: 0s - loss: 0.1210 - mean_absolute_error: 0.287 - 0s 215us/step - loss: 0.1185 - mean_absolute_error: 0.2709 - val_loss: 0.1003 - val_mean_absolute_error: 0.2376

Epoch 00178: val_loss did not improve from 0.09295
Epoch 179/5000
102/102 [==============================] - ETA: 0s - loss: 0.1149 - mean_absolute_error: 0.263 - 0s 215us/step - loss: 0.1216 - mean_absolute_error: 0.2761 - val_loss: 0.1002 - val_mean_absolute_error: 0.2370

Epoch 00179: val_loss did not improve from 0.09295
Epoch 180/5000
102/102 [==============================] - ETA: 0s - loss: 0.1120 - mean_absolute_error: 0.260 - 0s 186us/step - loss: 0.1073 - mean_absolute_error: 0.2582 - val_loss: 0.1035 - val_mean_absolute_error: 0.2405

Epoch 00180: val_loss did not improve from 0.09295
Epoch 181/5000
102/102 [==============================] - ETA: 0s - loss: 0.0831 - mean_absolute_error: 0.240 - 0s 254us/step - loss: 0.1274 - mean_absolute_error: 0.2905 - val_loss: 0.1054 - val_mean_absolute_error: 0.2426

Epoch 00181: val_loss did not improve from 0.09295
Epoch 182/5000
102/102 [==============================] - ETA: 0s - loss: 0.1049 - mean_absolute_error: 0.246 - 0s 186us/step - loss: 0.1151 - mean_absolute_error: 0.2609 - val_loss: 0.1063 - val_mean_absolute_error: 0.2440

Epoch 00182: val_loss did not improve from 0.09295
Epoch 183/5000
102/102 [==============================] - ETA: 0s - loss: 0.1175 - mean_absolute_error: 0.274 - 0s 215us/step - loss: 0.1174 - mean_absolute_error: 0.2753 - val_loss: 0.1072 - val_mean_absolute_error: 0.2445

Epoch 00183: val_loss did not improve from 0.09295
Epoch 184/5000
102/102 [==============================] - ETA: 0s - loss: 0.1092 - mean_absolute_error: 0.263 - 0s 215us/step - loss: 0.1296 - mean_absolute_error: 0.2864 - val_loss: 0.1071 - val_mean_absolute_error: 0.2437

Epoch 00184: val_loss did not improve from 0.09295
Epoch 185/5000
102/102 [==============================] - ETA: 0s - loss: 0.1429 - mean_absolute_error: 0.301 - 0s 195us/step - loss: 0.1240 - mean_absolute_error: 0.2760 - val_loss: 0.1059 - val_mean_absolute_error: 0.2425

Epoch 00185: val_loss did not improve from 0.09295
Epoch 186/5000
102/102 [==============================] - ETA: 0s - loss: 0.1461 - mean_absolute_error: 0.303 - 0s 225us/step - loss: 0.1354 - mean_absolute_error: 0.2941 - val_loss: 0.1054 - val_mean_absolute_error: 0.2424

Epoch 00186: val_loss did not improve from 0.09295
Epoch 187/5000
102/102 [==============================] - ETA: 0s - loss: 0.1372 - mean_absolute_error: 0.294 - 0s 205us/step - loss: 0.1336 - mean_absolute_error: 0.2912 - val_loss: 0.1064 - val_mean_absolute_error: 0.2444

Epoch 00187: val_loss did not improve from 0.09295
Epoch 188/5000
102/102 [==============================] - ETA: 0s - loss: 0.1123 - mean_absolute_error: 0.266 - 0s 205us/step - loss: 0.1236 - mean_absolute_error: 0.2826 - val_loss: 0.1060 - val_mean_absolute_error: 0.2447

Epoch 00188: val_loss did not improve from 0.09295
Epoch 189/5000
102/102 [==============================] - ETA: 0s - loss: 0.1185 - mean_absolute_error: 0.272 - 0s 196us/step - loss: 0.1157 - mean_absolute_error: 0.2664 - val_loss: 0.1062 - val_mean_absolute_error: 0.2456

Epoch 00189: val_loss did not improve from 0.09295
Epoch 190/5000
102/102 [==============================] - ETA: 0s - loss: 0.0882 - mean_absolute_error: 0.236 - 0s 215us/step - loss: 0.1213 - mean_absolute_error: 0.2728 - val_loss: 0.1074 - val_mean_absolute_error: 0.2474

Epoch 00190: val_loss did not improve from 0.09295
Epoch 191/5000
102/102 [==============================] - ETA: 0s - loss: 0.1502 - mean_absolute_error: 0.316 - 0s 205us/step - loss: 0.1197 - mean_absolute_error: 0.2738 - val_loss: 0.1070 - val_mean_absolute_error: 0.2467

Epoch 00191: val_loss did not improve from 0.09295
Epoch 192/5000
102/102 [==============================] - ETA: 0s - loss: 0.0856 - mean_absolute_error: 0.222 - 0s 196us/step - loss: 0.1070 - mean_absolute_error: 0.2550 - val_loss: 0.1033 - val_mean_absolute_error: 0.2421

Epoch 00192: val_loss did not improve from 0.09295
Epoch 193/5000
102/102 [==============================] - ETA: 0s - loss: 0.1067 - mean_absolute_error: 0.256 - 0s 205us/step - loss: 0.1137 - mean_absolute_error: 0.2720 - val_loss: 0.1006 - val_mean_absolute_error: 0.2386

Epoch 00193: val_loss did not improve from 0.09295
Epoch 194/5000
102/102 [==============================] - ETA: 0s - loss: 0.1075 - mean_absolute_error: 0.259 - 0s 196us/step - loss: 0.1240 - mean_absolute_error: 0.2850 - val_loss: 0.0996 - val_mean_absolute_error: 0.2381

Epoch 00194: val_loss did not improve from 0.09295
Epoch 195/5000
102/102 [==============================] - ETA: 0s - loss: 0.1122 - mean_absolute_error: 0.261 - 0s 166us/step - loss: 0.1142 - mean_absolute_error: 0.2638 - val_loss: 0.0992 - val_mean_absolute_error: 0.2382

Epoch 00195: val_loss did not improve from 0.09295
Epoch 196/5000
102/102 [==============================] - ETA: 0s - loss: 0.0752 - mean_absolute_error: 0.222 - 0s 176us/step - loss: 0.0945 - mean_absolute_error: 0.2468 - val_loss: 0.0991 - val_mean_absolute_error: 0.2380

Epoch 00196: val_loss did not improve from 0.09295
Epoch 197/5000
102/102 [==============================] - ETA: 0s - loss: 0.0939 - mean_absolute_error: 0.246 - 0s 205us/step - loss: 0.1153 - mean_absolute_error: 0.2699 - val_loss: 0.0995 - val_mean_absolute_error: 0.2382

Epoch 00197: val_loss did not improve from 0.09295
Epoch 198/5000
102/102 [==============================] - ETA: 0s - loss: 0.0861 - mean_absolute_error: 0.218 - 0s 205us/step - loss: 0.1096 - mean_absolute_error: 0.2592 - val_loss: 0.1005 - val_mean_absolute_error: 0.2386

Epoch 00198: val_loss did not improve from 0.09295
Epoch 199/5000
102/102 [==============================] - ETA: 0s - loss: 0.1190 - mean_absolute_error: 0.283 - 0s 205us/step - loss: 0.1044 - mean_absolute_error: 0.2547 - val_loss: 0.1015 - val_mean_absolute_error: 0.2389

Epoch 00199: val_loss did not improve from 0.09295
Epoch 200/5000
102/102 [==============================] - ETA: 0s - loss: 0.1160 - mean_absolute_error: 0.263 - 0s 186us/step - loss: 0.1042 - mean_absolute_error: 0.2480 - val_loss: 0.1015 - val_mean_absolute_error: 0.2385

Epoch 00200: val_loss did not improve from 0.09295
Epoch 201/5000
102/102 [==============================] - ETA: 0s - loss: 0.0824 - mean_absolute_error: 0.226 - 0s 244us/step - loss: 0.0957 - mean_absolute_error: 0.2373 - val_loss: 0.1018 - val_mean_absolute_error: 0.2383

Epoch 00201: val_loss did not improve from 0.09295
Epoch 202/5000
102/102 [==============================] - ETA: 0s - loss: 0.1137 - mean_absolute_error: 0.254 - 0s 215us/step - loss: 0.1135 - mean_absolute_error: 0.2636 - val_loss: 0.1012 - val_mean_absolute_error: 0.2369

Epoch 00202: val_loss did not improve from 0.09295
Epoch 203/5000
102/102 [==============================] - ETA: 0s - loss: 0.0972 - mean_absolute_error: 0.247 - 0s 215us/step - loss: 0.1017 - mean_absolute_error: 0.2483 - val_loss: 0.1007 - val_mean_absolute_error: 0.2360

Epoch 00203: val_loss did not improve from 0.09295
Epoch 204/5000
102/102 [==============================] - ETA: 0s - loss: 0.0981 - mean_absolute_error: 0.243 - 0s 196us/step - loss: 0.1066 - mean_absolute_error: 0.2534 - val_loss: 0.1010 - val_mean_absolute_error: 0.2356

Epoch 00204: val_loss did not improve from 0.09295
Epoch 205/5000
102/102 [==============================] - ETA: 0s - loss: 0.1517 - mean_absolute_error: 0.306 - 0s 215us/step - loss: 0.1263 - mean_absolute_error: 0.2819 - val_loss: 0.1011 - val_mean_absolute_error: 0.2356

Epoch 00205: val_loss did not improve from 0.09295
Epoch 206/5000
102/102 [==============================] - ETA: 0s - loss: 0.1205 - mean_absolute_error: 0.271 - 0s 215us/step - loss: 0.1045 - mean_absolute_error: 0.2512 - val_loss: 0.1027 - val_mean_absolute_error: 0.2373

Epoch 00206: val_loss did not improve from 0.09295
Epoch 207/5000
102/102 [==============================] - ETA: 0s - loss: 0.1204 - mean_absolute_error: 0.267 - 0s 196us/step - loss: 0.1254 - mean_absolute_error: 0.2786 - val_loss: 0.1043 - val_mean_absolute_error: 0.2394

Epoch 00207: val_loss did not improve from 0.09295
Epoch 208/5000
102/102 [==============================] - ETA: 0s - loss: 0.0919 - mean_absolute_error: 0.236 - 0s 186us/step - loss: 0.1165 - mean_absolute_error: 0.2636 - val_loss: 0.1053 - val_mean_absolute_error: 0.2410

Epoch 00208: val_loss did not improve from 0.09295
Epoch 209/5000
102/102 [==============================] - ETA: 0s - loss: 0.0952 - mean_absolute_error: 0.237 - 0s 205us/step - loss: 0.1058 - mean_absolute_error: 0.2506 - val_loss: 0.1040 - val_mean_absolute_error: 0.2402

Epoch 00209: val_loss did not improve from 0.09295
Epoch 210/5000
102/102 [==============================] - ETA: 0s - loss: 0.1282 - mean_absolute_error: 0.283 - 0s 196us/step - loss: 0.1247 - mean_absolute_error: 0.2797 - val_loss: 0.1001 - val_mean_absolute_error: 0.2371

Epoch 00210: val_loss did not improve from 0.09295
Epoch 211/5000
102/102 [==============================] - ETA: 0s - loss: 0.1256 - mean_absolute_error: 0.274 - 0s 196us/step - loss: 0.1148 - mean_absolute_error: 0.2669 - val_loss: 0.0939 - val_mean_absolute_error: 0.2313

Epoch 00211: val_loss did not improve from 0.09295
Epoch 212/5000
102/102 [==============================] - ETA: 0s - loss: 0.1272 - mean_absolute_error: 0.292 - 0s 205us/step - loss: 0.1113 - mean_absolute_error: 0.2663 - val_loss: 0.0915 - val_mean_absolute_error: 0.2288

Epoch 00212: val_loss improved from 0.09295 to 0.09154, saving model to find_phone_dnn_model.h5
Epoch 213/5000
102/102 [==============================] - ETA: 0s - loss: 0.1251 - mean_absolute_error: 0.252 - 0s 176us/step - loss: 0.1176 - mean_absolute_error: 0.2630 - val_loss: 0.0924 - val_mean_absolute_error: 0.2301

Epoch 00213: val_loss did not improve from 0.09154
Epoch 214/5000
102/102 [==============================] - ETA: 0s - loss: 0.1131 - mean_absolute_error: 0.258 - 0s 554us/step - loss: 0.0979 - mean_absolute_error: 0.2412 - val_loss: 0.0942 - val_mean_absolute_error: 0.2314

Epoch 00214: val_loss did not improve from 0.09154
Epoch 215/5000
102/102 [==============================] - ETA: 0s - loss: 0.0970 - mean_absolute_error: 0.243 - 0s 293us/step - loss: 0.0977 - mean_absolute_error: 0.2436 - val_loss: 0.0977 - val_mean_absolute_error: 0.2349

Epoch 00215: val_loss did not improve from 0.09154
Epoch 216/5000
102/102 [==============================] - ETA: 0s - loss: 0.1230 - mean_absolute_error: 0.289 - 0s 205us/step - loss: 0.1291 - mean_absolute_error: 0.2884 - val_loss: 0.1006 - val_mean_absolute_error: 0.2371

Epoch 00216: val_loss did not improve from 0.09154
Epoch 217/5000
102/102 [==============================] - ETA: 0s - loss: 0.1282 - mean_absolute_error: 0.294 - 0s 205us/step - loss: 0.1082 - mean_absolute_error: 0.2622 - val_loss: 0.0996 - val_mean_absolute_error: 0.2366

Epoch 00217: val_loss did not improve from 0.09154
Epoch 218/5000
102/102 [==============================] - ETA: 0s - loss: 0.0997 - mean_absolute_error: 0.249 - 0s 235us/step - loss: 0.1111 - mean_absolute_error: 0.2615 - val_loss: 0.0990 - val_mean_absolute_error: 0.2363

Epoch 00218: val_loss did not improve from 0.09154
Epoch 219/5000
102/102 [==============================] - ETA: 0s - loss: 0.0956 - mean_absolute_error: 0.241 - 0s 205us/step - loss: 0.1137 - mean_absolute_error: 0.2670 - val_loss: 0.0981 - val_mean_absolute_error: 0.2351

Epoch 00219: val_loss did not improve from 0.09154
Epoch 220/5000
102/102 [==============================] - ETA: 0s - loss: 0.1390 - mean_absolute_error: 0.290 - 0s 205us/step - loss: 0.1097 - mean_absolute_error: 0.2562 - val_loss: 0.0970 - val_mean_absolute_error: 0.2335

Epoch 00220: val_loss did not improve from 0.09154
Epoch 221/5000
102/102 [==============================] - ETA: 0s - loss: 0.0685 - mean_absolute_error: 0.211 - 0s 196us/step - loss: 0.0970 - mean_absolute_error: 0.2468 - val_loss: 0.0974 - val_mean_absolute_error: 0.2334

Epoch 00221: val_loss did not improve from 0.09154
Epoch 222/5000
102/102 [==============================] - ETA: 0s - loss: 0.1150 - mean_absolute_error: 0.250 - 0s 205us/step - loss: 0.1124 - mean_absolute_error: 0.2589 - val_loss: 0.1006 - val_mean_absolute_error: 0.2352

Epoch 00222: val_loss did not improve from 0.09154
Epoch 223/5000
102/102 [==============================] - ETA: 0s - loss: 0.0783 - mean_absolute_error: 0.209 - 0s 244us/step - loss: 0.1034 - mean_absolute_error: 0.2459 - val_loss: 0.1037 - val_mean_absolute_error: 0.2373

Epoch 00223: val_loss did not improve from 0.09154
Epoch 224/5000
102/102 [==============================] - ETA: 0s - loss: 0.0973 - mean_absolute_error: 0.246 - 0s 215us/step - loss: 0.1095 - mean_absolute_error: 0.2630 - val_loss: 0.1034 - val_mean_absolute_error: 0.2371

Epoch 00224: val_loss did not improve from 0.09154
Epoch 225/5000
102/102 [==============================] - ETA: 0s - loss: 0.1824 - mean_absolute_error: 0.338 - 0s 196us/step - loss: 0.1549 - mean_absolute_error: 0.3087 - val_loss: 0.1027 - val_mean_absolute_error: 0.2369

Epoch 00225: val_loss did not improve from 0.09154
Epoch 226/5000
102/102 [==============================] - ETA: 0s - loss: 0.1072 - mean_absolute_error: 0.271 - 0s 244us/step - loss: 0.1121 - mean_absolute_error: 0.2632 - val_loss: 0.1015 - val_mean_absolute_error: 0.2364

Epoch 00226: val_loss did not improve from 0.09154
Epoch 227/5000
102/102 [==============================] - ETA: 0s - loss: 0.1330 - mean_absolute_error: 0.296 - 0s 235us/step - loss: 0.1165 - mean_absolute_error: 0.2712 - val_loss: 0.0999 - val_mean_absolute_error: 0.2351

Epoch 00227: val_loss did not improve from 0.09154
Epoch 228/5000
102/102 [==============================] - ETA: 0s - loss: 0.1424 - mean_absolute_error: 0.312 - 0s 215us/step - loss: 0.1172 - mean_absolute_error: 0.2756 - val_loss: 0.0981 - val_mean_absolute_error: 0.2336

Epoch 00228: val_loss did not improve from 0.09154
Epoch 229/5000
102/102 [==============================] - ETA: 0s - loss: 0.1085 - mean_absolute_error: 0.259 - 0s 225us/step - loss: 0.1080 - mean_absolute_error: 0.2551 - val_loss: 0.0973 - val_mean_absolute_error: 0.2325

Epoch 00229: val_loss did not improve from 0.09154
Epoch 230/5000
102/102 [==============================] - ETA: 0s - loss: 0.0944 - mean_absolute_error: 0.254 - 0s 205us/step - loss: 0.1145 - mean_absolute_error: 0.2705 - val_loss: 0.0968 - val_mean_absolute_error: 0.2315

Epoch 00230: val_loss did not improve from 0.09154
Epoch 231/5000
102/102 [==============================] - ETA: 0s - loss: 0.1207 - mean_absolute_error: 0.264 - 0s 215us/step - loss: 0.1225 - mean_absolute_error: 0.2742 - val_loss: 0.0953 - val_mean_absolute_error: 0.2291

Epoch 00231: val_loss did not improve from 0.09154
Epoch 232/5000
102/102 [==============================] - ETA: 0s - loss: 0.1352 - mean_absolute_error: 0.288 - 0s 235us/step - loss: 0.1061 - mean_absolute_error: 0.2499 - val_loss: 0.0947 - val_mean_absolute_error: 0.2280

Epoch 00232: val_loss did not improve from 0.09154
Epoch 233/5000
102/102 [==============================] - ETA: 0s - loss: 0.1083 - mean_absolute_error: 0.254 - 0s 254us/step - loss: 0.1082 - mean_absolute_error: 0.2610 - val_loss: 0.0950 - val_mean_absolute_error: 0.2278

Epoch 00233: val_loss did not improve from 0.09154
Epoch 234/5000
102/102 [==============================] - ETA: 0s - loss: 0.1179 - mean_absolute_error: 0.278 - 0s 215us/step - loss: 0.1120 - mean_absolute_error: 0.2608 - val_loss: 0.0943 - val_mean_absolute_error: 0.2269

Epoch 00234: val_loss did not improve from 0.09154
Epoch 235/5000
102/102 [==============================] - ETA: 0s - loss: 0.1134 - mean_absolute_error: 0.275 - 0s 186us/step - loss: 0.0872 - mean_absolute_error: 0.2388 - val_loss: 0.0941 - val_mean_absolute_error: 0.2267

Epoch 00235: val_loss did not improve from 0.09154
Epoch 236/5000
102/102 [==============================] - ETA: 0s - loss: 0.1386 - mean_absolute_error: 0.301 - 0s 205us/step - loss: 0.1154 - mean_absolute_error: 0.2680 - val_loss: 0.0949 - val_mean_absolute_error: 0.2279

Epoch 00236: val_loss did not improve from 0.09154
Epoch 237/5000
102/102 [==============================] - ETA: 0s - loss: 0.1051 - mean_absolute_error: 0.258 - 0s 186us/step - loss: 0.1062 - mean_absolute_error: 0.2581 - val_loss: 0.0944 - val_mean_absolute_error: 0.2288

Epoch 00237: val_loss did not improve from 0.09154
Epoch 238/5000
102/102 [==============================] - ETA: 0s - loss: 0.1125 - mean_absolute_error: 0.254 - 0s 244us/step - loss: 0.1054 - mean_absolute_error: 0.2542 - val_loss: 0.0958 - val_mean_absolute_error: 0.2312

Epoch 00238: val_loss did not improve from 0.09154
Epoch 239/5000
102/102 [==============================] - ETA: 0s - loss: 0.1051 - mean_absolute_error: 0.265 - 0s 244us/step - loss: 0.1077 - mean_absolute_error: 0.2575 - val_loss: 0.0985 - val_mean_absolute_error: 0.2338

Epoch 00239: val_loss did not improve from 0.09154
Epoch 240/5000
102/102 [==============================] - ETA: 0s - loss: 0.1285 - mean_absolute_error: 0.287 - 0s 215us/step - loss: 0.1149 - mean_absolute_error: 0.2630 - val_loss: 0.0969 - val_mean_absolute_error: 0.2319

Epoch 00240: val_loss did not improve from 0.09154
Epoch 241/5000
102/102 [==============================] - ETA: 0s - loss: 0.1029 - mean_absolute_error: 0.244 - 0s 215us/step - loss: 0.1179 - mean_absolute_error: 0.2754 - val_loss: 0.0953 - val_mean_absolute_error: 0.2297

Epoch 00241: val_loss did not improve from 0.09154
Epoch 242/5000
102/102 [==============================] - ETA: 0s - loss: 0.0982 - mean_absolute_error: 0.251 - 0s 225us/step - loss: 0.0958 - mean_absolute_error: 0.2408 - val_loss: 0.0925 - val_mean_absolute_error: 0.2265

Epoch 00242: val_loss did not improve from 0.09154
Epoch 243/5000
102/102 [==============================] - ETA: 0s - loss: 0.0811 - mean_absolute_error: 0.212 - 0s 254us/step - loss: 0.0995 - mean_absolute_error: 0.2418 - val_loss: 0.0904 - val_mean_absolute_error: 0.2252

Epoch 00243: val_loss improved from 0.09154 to 0.09035, saving model to find_phone_dnn_model.h5
Epoch 244/5000
102/102 [==============================] - ETA: 0s - loss: 0.0780 - mean_absolute_error: 0.206 - 0s 225us/step - loss: 0.1011 - mean_absolute_error: 0.2469 - val_loss: 0.0898 - val_mean_absolute_error: 0.2253

Epoch 00244: val_loss improved from 0.09035 to 0.08979, saving model to find_phone_dnn_model.h5
Epoch 245/5000
102/102 [==============================] - ETA: 0s - loss: 0.1146 - mean_absolute_error: 0.266 - 0s 186us/step - loss: 0.1050 - mean_absolute_error: 0.2559 - val_loss: 0.0911 - val_mean_absolute_error: 0.2270

Epoch 00245: val_loss did not improve from 0.08979
Epoch 246/5000
102/102 [==============================] - ETA: 0s - loss: 0.0868 - mean_absolute_error: 0.227 - 0s 205us/step - loss: 0.1128 - mean_absolute_error: 0.2644 - val_loss: 0.0935 - val_mean_absolute_error: 0.2302

Epoch 00246: val_loss did not improve from 0.08979
Epoch 247/5000
102/102 [==============================] - ETA: 0s - loss: 0.0966 - mean_absolute_error: 0.235 - 0s 215us/step - loss: 0.1164 - mean_absolute_error: 0.2666 - val_loss: 0.0951 - val_mean_absolute_error: 0.2324

Epoch 00247: val_loss did not improve from 0.08979
Epoch 248/5000
102/102 [==============================] - ETA: 0s - loss: 0.0691 - mean_absolute_error: 0.207 - 0s 196us/step - loss: 0.1001 - mean_absolute_error: 0.2482 - val_loss: 0.0954 - val_mean_absolute_error: 0.2338

Epoch 00248: val_loss did not improve from 0.08979
Epoch 249/5000
102/102 [==============================] - ETA: 0s - loss: 0.1152 - mean_absolute_error: 0.258 - 0s 225us/step - loss: 0.1095 - mean_absolute_error: 0.2576 - val_loss: 0.0948 - val_mean_absolute_error: 0.2343

Epoch 00249: val_loss did not improve from 0.08979
Epoch 250/5000
102/102 [==============================] - ETA: 0s - loss: 0.0888 - mean_absolute_error: 0.238 - 0s 196us/step - loss: 0.0934 - mean_absolute_error: 0.2417 - val_loss: 0.0943 - val_mean_absolute_error: 0.2333

Epoch 00250: val_loss did not improve from 0.08979
Epoch 251/5000
102/102 [==============================] - ETA: 0s - loss: 0.0979 - mean_absolute_error: 0.241 - 0s 205us/step - loss: 0.1042 - mean_absolute_error: 0.2503 - val_loss: 0.0939 - val_mean_absolute_error: 0.2318

Epoch 00251: val_loss did not improve from 0.08979
Epoch 252/5000
102/102 [==============================] - ETA: 0s - loss: 0.0885 - mean_absolute_error: 0.223 - 0s 196us/step - loss: 0.0962 - mean_absolute_error: 0.2414 - val_loss: 0.0945 - val_mean_absolute_error: 0.2321

Epoch 00252: val_loss did not improve from 0.08979
Epoch 253/5000
102/102 [==============================] - ETA: 0s - loss: 0.0909 - mean_absolute_error: 0.229 - 0s 205us/step - loss: 0.1066 - mean_absolute_error: 0.2489 - val_loss: 0.0949 - val_mean_absolute_error: 0.2320

Epoch 00253: val_loss did not improve from 0.08979
Epoch 254/5000
102/102 [==============================] - ETA: 0s - loss: 0.0859 - mean_absolute_error: 0.220 - 0s 205us/step - loss: 0.1099 - mean_absolute_error: 0.2571 - val_loss: 0.0934 - val_mean_absolute_error: 0.2305

Epoch 00254: val_loss did not improve from 0.08979
Epoch 255/5000
102/102 [==============================] - ETA: 0s - loss: 0.1230 - mean_absolute_error: 0.278 - 0s 196us/step - loss: 0.1133 - mean_absolute_error: 0.2660 - val_loss: 0.0904 - val_mean_absolute_error: 0.2276

Epoch 00255: val_loss did not improve from 0.08979
Epoch 256/5000
102/102 [==============================] - ETA: 0s - loss: 0.1006 - mean_absolute_error: 0.250 - 0s 215us/step - loss: 0.0911 - mean_absolute_error: 0.2331 - val_loss: 0.0865 - val_mean_absolute_error: 0.2231

Epoch 00256: val_loss improved from 0.08979 to 0.08648, saving model to find_phone_dnn_model.h5
Epoch 257/5000
102/102 [==============================] - ETA: 0s - loss: 0.0941 - mean_absolute_error: 0.231 - 0s 196us/step - loss: 0.1077 - mean_absolute_error: 0.2492 - val_loss: 0.0850 - val_mean_absolute_error: 0.2210

Epoch 00257: val_loss improved from 0.08648 to 0.08505, saving model to find_phone_dnn_model.h5
Epoch 258/5000
102/102 [==============================] - ETA: 0s - loss: 0.1129 - mean_absolute_error: 0.265 - 0s 284us/step - loss: 0.1090 - mean_absolute_error: 0.2583 - val_loss: 0.0849 - val_mean_absolute_error: 0.2202

Epoch 00258: val_loss improved from 0.08505 to 0.08486, saving model to find_phone_dnn_model.h5
Epoch 259/5000
102/102 [==============================] - ETA: 0s - loss: 0.0996 - mean_absolute_error: 0.234 - 0s 196us/step - loss: 0.1040 - mean_absolute_error: 0.2459 - val_loss: 0.0861 - val_mean_absolute_error: 0.2219

Epoch 00259: val_loss did not improve from 0.08486
Epoch 260/5000
102/102 [==============================] - ETA: 0s - loss: 0.1149 - mean_absolute_error: 0.265 - 0s 264us/step - loss: 0.1192 - mean_absolute_error: 0.2636 - val_loss: 0.0863 - val_mean_absolute_error: 0.2231

Epoch 00260: val_loss did not improve from 0.08486
Epoch 261/5000
102/102 [==============================] - ETA: 0s - loss: 0.1249 - mean_absolute_error: 0.278 - 0s 225us/step - loss: 0.0957 - mean_absolute_error: 0.2371 - val_loss: 0.0894 - val_mean_absolute_error: 0.2271

Epoch 00261: val_loss did not improve from 0.08486
Epoch 262/5000
102/102 [==============================] - ETA: 0s - loss: 0.0858 - mean_absolute_error: 0.220 - 0s 235us/step - loss: 0.0903 - mean_absolute_error: 0.2286 - val_loss: 0.0929 - val_mean_absolute_error: 0.2307

Epoch 00262: val_loss did not improve from 0.08486
Epoch 263/5000
102/102 [==============================] - ETA: 0s - loss: 0.0809 - mean_absolute_error: 0.237 - 0s 244us/step - loss: 0.0947 - mean_absolute_error: 0.2499 - val_loss: 0.0958 - val_mean_absolute_error: 0.2341

Epoch 00263: val_loss did not improve from 0.08486
Epoch 264/5000
102/102 [==============================] - ETA: 0s - loss: 0.1016 - mean_absolute_error: 0.244 - 0s 215us/step - loss: 0.1090 - mean_absolute_error: 0.2555 - val_loss: 0.0983 - val_mean_absolute_error: 0.2381

Epoch 00264: val_loss did not improve from 0.08486
Epoch 265/5000
102/102 [==============================] - ETA: 0s - loss: 0.0803 - mean_absolute_error: 0.219 - 0s 225us/step - loss: 0.1026 - mean_absolute_error: 0.2468 - val_loss: 0.1001 - val_mean_absolute_error: 0.2407

Epoch 00265: val_loss did not improve from 0.08486
Epoch 266/5000
102/102 [==============================] - ETA: 0s - loss: 0.1186 - mean_absolute_error: 0.262 - 0s 235us/step - loss: 0.1083 - mean_absolute_error: 0.2522 - val_loss: 0.0998 - val_mean_absolute_error: 0.2410

Epoch 00266: val_loss did not improve from 0.08486
Epoch 267/5000
102/102 [==============================] - ETA: 0s - loss: 0.1012 - mean_absolute_error: 0.237 - 0s 196us/step - loss: 0.1015 - mean_absolute_error: 0.2504 - val_loss: 0.0990 - val_mean_absolute_error: 0.2407

Epoch 00267: val_loss did not improve from 0.08486
Epoch 268/5000
102/102 [==============================] - ETA: 0s - loss: 0.1144 - mean_absolute_error: 0.259 - 0s 166us/step - loss: 0.1203 - mean_absolute_error: 0.2694 - val_loss: 0.0993 - val_mean_absolute_error: 0.2403

Epoch 00268: val_loss did not improve from 0.08486
Epoch 269/5000
102/102 [==============================] - ETA: 0s - loss: 0.1043 - mean_absolute_error: 0.244 - 0s 205us/step - loss: 0.0935 - mean_absolute_error: 0.2348 - val_loss: 0.0991 - val_mean_absolute_error: 0.2385

Epoch 00269: val_loss did not improve from 0.08486
Epoch 270/5000
102/102 [==============================] - ETA: 0s - loss: 0.0814 - mean_absolute_error: 0.212 - 0s 205us/step - loss: 0.0982 - mean_absolute_error: 0.2390 - val_loss: 0.0991 - val_mean_absolute_error: 0.2374

Epoch 00270: val_loss did not improve from 0.08486
Epoch 271/5000
102/102 [==============================] - ETA: 0s - loss: 0.0938 - mean_absolute_error: 0.237 - 0s 205us/step - loss: 0.1042 - mean_absolute_error: 0.2528 - val_loss: 0.0995 - val_mean_absolute_error: 0.2367

Epoch 00271: val_loss did not improve from 0.08486
Epoch 272/5000
102/102 [==============================] - ETA: 0s - loss: 0.1278 - mean_absolute_error: 0.273 - 0s 186us/step - loss: 0.0997 - mean_absolute_error: 0.2450 - val_loss: 0.1000 - val_mean_absolute_error: 0.2363

Epoch 00272: val_loss did not improve from 0.08486
Epoch 273/5000
102/102 [==============================] - ETA: 0s - loss: 0.1461 - mean_absolute_error: 0.300 - 0s 235us/step - loss: 0.1209 - mean_absolute_error: 0.2658 - val_loss: 0.0995 - val_mean_absolute_error: 0.2360

Epoch 00273: val_loss did not improve from 0.08486
Epoch 274/5000
102/102 [==============================] - ETA: 0s - loss: 0.0790 - mean_absolute_error: 0.210 - 0s 176us/step - loss: 0.1003 - mean_absolute_error: 0.2380 - val_loss: 0.1002 - val_mean_absolute_error: 0.2371

Epoch 00274: val_loss did not improve from 0.08486
Epoch 275/5000
102/102 [==============================] - ETA: 0s - loss: 0.0874 - mean_absolute_error: 0.229 - 0s 176us/step - loss: 0.1024 - mean_absolute_error: 0.2529 - val_loss: 0.0993 - val_mean_absolute_error: 0.2362

Epoch 00275: val_loss did not improve from 0.08486
Epoch 276/5000
102/102 [==============================] - ETA: 0s - loss: 0.1029 - mean_absolute_error: 0.241 - 0s 206us/step - loss: 0.0967 - mean_absolute_error: 0.2421 - val_loss: 0.0971 - val_mean_absolute_error: 0.2350

Epoch 00276: val_loss did not improve from 0.08486
Epoch 277/5000
102/102 [==============================] - ETA: 0s - loss: 0.1155 - mean_absolute_error: 0.271 - 0s 205us/step - loss: 0.1206 - mean_absolute_error: 0.2715 - val_loss: 0.0959 - val_mean_absolute_error: 0.2351

Epoch 00277: val_loss did not improve from 0.08486
Epoch 278/5000
102/102 [==============================] - ETA: 0s - loss: 0.1040 - mean_absolute_error: 0.251 - 0s 225us/step - loss: 0.1070 - mean_absolute_error: 0.2530 - val_loss: 0.0968 - val_mean_absolute_error: 0.2364

Epoch 00278: val_loss did not improve from 0.08486
Epoch 279/5000
102/102 [==============================] - ETA: 0s - loss: 0.1099 - mean_absolute_error: 0.264 - 0s 244us/step - loss: 0.0980 - mean_absolute_error: 0.2465 - val_loss: 0.0976 - val_mean_absolute_error: 0.2376

Epoch 00279: val_loss did not improve from 0.08486
Epoch 280/5000
102/102 [==============================] - ETA: 0s - loss: 0.0900 - mean_absolute_error: 0.229 - 0s 205us/step - loss: 0.0994 - mean_absolute_error: 0.2432 - val_loss: 0.0973 - val_mean_absolute_error: 0.2380

Epoch 00280: val_loss did not improve from 0.08486
Epoch 281/5000
102/102 [==============================] - ETA: 0s - loss: 0.1178 - mean_absolute_error: 0.269 - 0s 215us/step - loss: 0.1157 - mean_absolute_error: 0.2667 - val_loss: 0.0968 - val_mean_absolute_error: 0.2380

Epoch 00281: val_loss did not improve from 0.08486
Epoch 282/5000
102/102 [==============================] - ETA: 0s - loss: 0.1007 - mean_absolute_error: 0.250 - 0s 215us/step - loss: 0.0953 - mean_absolute_error: 0.2369 - val_loss: 0.0955 - val_mean_absolute_error: 0.2375

Epoch 00282: val_loss did not improve from 0.08486
Epoch 283/5000
102/102 [==============================] - ETA: 0s - loss: 0.0994 - mean_absolute_error: 0.246 - 0s 196us/step - loss: 0.1048 - mean_absolute_error: 0.2545 - val_loss: 0.0950 - val_mean_absolute_error: 0.2370

Epoch 00283: val_loss did not improve from 0.08486
Epoch 284/5000
102/102 [==============================] - ETA: 0s - loss: 0.0836 - mean_absolute_error: 0.228 - 0s 225us/step - loss: 0.0922 - mean_absolute_error: 0.2359 - val_loss: 0.0935 - val_mean_absolute_error: 0.2351

Epoch 00284: val_loss did not improve from 0.08486
Epoch 285/5000
102/102 [==============================] - ETA: 0s - loss: 0.1089 - mean_absolute_error: 0.260 - 0s 205us/step - loss: 0.1192 - mean_absolute_error: 0.2690 - val_loss: 0.0917 - val_mean_absolute_error: 0.2332

Epoch 00285: val_loss did not improve from 0.08486
Epoch 286/5000
102/102 [==============================] - ETA: 0s - loss: 0.1175 - mean_absolute_error: 0.248 - 0s 215us/step - loss: 0.0983 - mean_absolute_error: 0.2326 - val_loss: 0.0904 - val_mean_absolute_error: 0.2314

Epoch 00286: val_loss did not improve from 0.08486
Epoch 287/5000
102/102 [==============================] - ETA: 0s - loss: 0.1595 - mean_absolute_error: 0.318 - 0s 215us/step - loss: 0.1171 - mean_absolute_error: 0.2644 - val_loss: 0.0907 - val_mean_absolute_error: 0.2305

Epoch 00287: val_loss did not improve from 0.08486
Epoch 288/5000
102/102 [==============================] - ETA: 0s - loss: 0.1123 - mean_absolute_error: 0.267 - 0s 196us/step - loss: 0.0894 - mean_absolute_error: 0.2348 - val_loss: 0.0925 - val_mean_absolute_error: 0.2308

Epoch 00288: val_loss did not improve from 0.08486
Epoch 289/5000
102/102 [==============================] - ETA: 0s - loss: 0.1171 - mean_absolute_error: 0.257 - 0s 225us/step - loss: 0.0950 - mean_absolute_error: 0.2344 - val_loss: 0.0928 - val_mean_absolute_error: 0.2302

Epoch 00289: val_loss did not improve from 0.08486
Epoch 290/5000
102/102 [==============================] - ETA: 0s - loss: 0.1181 - mean_absolute_error: 0.258 - 0s 215us/step - loss: 0.1168 - mean_absolute_error: 0.2706 - val_loss: 0.0931 - val_mean_absolute_error: 0.2302

Epoch 00290: val_loss did not improve from 0.08486
Epoch 291/5000
102/102 [==============================] - ETA: 0s - loss: 0.0949 - mean_absolute_error: 0.229 - 0s 205us/step - loss: 0.0944 - mean_absolute_error: 0.2346 - val_loss: 0.0925 - val_mean_absolute_error: 0.2299

Epoch 00291: val_loss did not improve from 0.08486
Epoch 292/5000
102/102 [==============================] - ETA: 0s - loss: 0.1049 - mean_absolute_error: 0.258 - 0s 225us/step - loss: 0.1074 - mean_absolute_error: 0.2595 - val_loss: 0.0912 - val_mean_absolute_error: 0.2292

Epoch 00292: val_loss did not improve from 0.08486
Epoch 293/5000
102/102 [==============================] - ETA: 0s - loss: 0.0790 - mean_absolute_error: 0.223 - 0s 205us/step - loss: 0.0918 - mean_absolute_error: 0.2379 - val_loss: 0.0899 - val_mean_absolute_error: 0.2282

Epoch 00293: val_loss did not improve from 0.08486
Epoch 294/5000
102/102 [==============================] - ETA: 0s - loss: 0.0643 - mean_absolute_error: 0.212 - 0s 196us/step - loss: 0.0843 - mean_absolute_error: 0.2291 - val_loss: 0.0910 - val_mean_absolute_error: 0.2299

Epoch 00294: val_loss did not improve from 0.08486
Epoch 295/5000
102/102 [==============================] - ETA: 0s - loss: 0.0887 - mean_absolute_error: 0.230 - 0s 196us/step - loss: 0.1057 - mean_absolute_error: 0.2533 - val_loss: 0.0925 - val_mean_absolute_error: 0.2319

Epoch 00295: val_loss did not improve from 0.08486
Epoch 296/5000
102/102 [==============================] - ETA: 0s - loss: 0.0911 - mean_absolute_error: 0.230 - 0s 215us/step - loss: 0.0957 - mean_absolute_error: 0.2380 - val_loss: 0.0941 - val_mean_absolute_error: 0.2340

Epoch 00296: val_loss did not improve from 0.08486
Epoch 297/5000
102/102 [==============================] - ETA: 0s - loss: 0.0907 - mean_absolute_error: 0.216 - 0s 205us/step - loss: 0.0904 - mean_absolute_error: 0.2266 - val_loss: 0.0956 - val_mean_absolute_error: 0.2358

Epoch 00297: val_loss did not improve from 0.08486
Epoch 298/5000
102/102 [==============================] - ETA: 0s - loss: 0.1061 - mean_absolute_error: 0.266 - 0s 205us/step - loss: 0.1011 - mean_absolute_error: 0.2491 - val_loss: 0.0958 - val_mean_absolute_error: 0.2370

Epoch 00298: val_loss did not improve from 0.08486
Epoch 299/5000
102/102 [==============================] - ETA: 0s - loss: 0.0780 - mean_absolute_error: 0.220 - 0s 205us/step - loss: 0.0940 - mean_absolute_error: 0.2313 - val_loss: 0.0964 - val_mean_absolute_error: 0.2380

Epoch 00299: val_loss did not improve from 0.08486
Epoch 300/5000
102/102 [==============================] - ETA: 0s - loss: 0.1132 - mean_absolute_error: 0.263 - 0s 235us/step - loss: 0.1136 - mean_absolute_error: 0.2631 - val_loss: 0.0956 - val_mean_absolute_error: 0.2377

Epoch 00300: val_loss did not improve from 0.08486
Epoch 301/5000
102/102 [==============================] - ETA: 0s - loss: 0.0822 - mean_absolute_error: 0.220 - 0s 196us/step - loss: 0.0899 - mean_absolute_error: 0.2325 - val_loss: 0.0930 - val_mean_absolute_error: 0.2353

Epoch 00301: val_loss did not improve from 0.08486
Epoch 302/5000
102/102 [==============================] - ETA: 0s - loss: 0.1071 - mean_absolute_error: 0.246 - 0s 235us/step - loss: 0.0845 - mean_absolute_error: 0.2187 - val_loss: 0.0906 - val_mean_absolute_error: 0.2325

Epoch 00302: val_loss did not improve from 0.08486
Epoch 303/5000
102/102 [==============================] - ETA: 0s - loss: 0.1175 - mean_absolute_error: 0.266 - 0s 205us/step - loss: 0.0951 - mean_absolute_error: 0.2344 - val_loss: 0.0892 - val_mean_absolute_error: 0.2306

Epoch 00303: val_loss did not improve from 0.08486
Epoch 304/5000
102/102 [==============================] - ETA: 0s - loss: 0.0936 - mean_absolute_error: 0.220 - 0s 205us/step - loss: 0.0874 - mean_absolute_error: 0.2243 - val_loss: 0.0895 - val_mean_absolute_error: 0.2298

Epoch 00304: val_loss did not improve from 0.08486
Epoch 305/5000
102/102 [==============================] - ETA: 0s - loss: 0.0886 - mean_absolute_error: 0.230 - 0s 205us/step - loss: 0.0819 - mean_absolute_error: 0.2142 - val_loss: 0.0914 - val_mean_absolute_error: 0.2301

Epoch 00305: val_loss did not improve from 0.08486
Epoch 306/5000
102/102 [==============================] - ETA: 0s - loss: 0.1039 - mean_absolute_error: 0.248 - 0s 176us/step - loss: 0.0960 - mean_absolute_error: 0.2372 - val_loss: 0.0919 - val_mean_absolute_error: 0.2294

Epoch 00306: val_loss did not improve from 0.08486
Epoch 307/5000
102/102 [==============================] - ETA: 0s - loss: 0.0746 - mean_absolute_error: 0.207 - 0s 205us/step - loss: 0.0892 - mean_absolute_error: 0.2196 - val_loss: 0.0902 - val_mean_absolute_error: 0.2273

Epoch 00307: val_loss did not improve from 0.08486
Epoch 308/5000
102/102 [==============================] - ETA: 0s - loss: 0.1091 - mean_absolute_error: 0.252 - 0s 196us/step - loss: 0.0970 - mean_absolute_error: 0.2445 - val_loss: 0.0882 - val_mean_absolute_error: 0.2253

Epoch 00308: val_loss did not improve from 0.08486
Epoch 309/5000
102/102 [==============================] - ETA: 0s - loss: 0.1127 - mean_absolute_error: 0.254 - 0s 215us/step - loss: 0.0957 - mean_absolute_error: 0.2338 - val_loss: 0.0884 - val_mean_absolute_error: 0.2259

Epoch 00309: val_loss did not improve from 0.08486
Epoch 310/5000
102/102 [==============================] - ETA: 0s - loss: 0.1178 - mean_absolute_error: 0.277 - 0s 215us/step - loss: 0.0919 - mean_absolute_error: 0.2347 - val_loss: 0.0909 - val_mean_absolute_error: 0.2287

Epoch 00310: val_loss did not improve from 0.08486
Epoch 311/5000
102/102 [==============================] - ETA: 0s - loss: 0.0733 - mean_absolute_error: 0.214 - 0s 205us/step - loss: 0.0930 - mean_absolute_error: 0.2391 - val_loss: 0.0911 - val_mean_absolute_error: 0.2294

Epoch 00311: val_loss did not improve from 0.08486
Epoch 312/5000
102/102 [==============================] - ETA: 0s - loss: 0.1134 - mean_absolute_error: 0.265 - 0s 196us/step - loss: 0.1077 - mean_absolute_error: 0.2512 - val_loss: 0.0904 - val_mean_absolute_error: 0.2294

Epoch 00312: val_loss did not improve from 0.08486
Epoch 313/5000
102/102 [==============================] - ETA: 0s - loss: 0.0974 - mean_absolute_error: 0.241 - 0s 186us/step - loss: 0.0774 - mean_absolute_error: 0.2103 - val_loss: 0.0901 - val_mean_absolute_error: 0.2293

Epoch 00313: val_loss did not improve from 0.08486
Epoch 314/5000
102/102 [==============================] - ETA: 0s - loss: 0.0747 - mean_absolute_error: 0.214 - 0s 205us/step - loss: 0.0825 - mean_absolute_error: 0.2216 - val_loss: 0.0902 - val_mean_absolute_error: 0.2295

Epoch 00314: val_loss did not improve from 0.08486
Epoch 315/5000
102/102 [==============================] - ETA: 0s - loss: 0.0809 - mean_absolute_error: 0.215 - 0s 205us/step - loss: 0.0819 - mean_absolute_error: 0.2213 - val_loss: 0.0897 - val_mean_absolute_error: 0.2289

Epoch 00315: val_loss did not improve from 0.08486
Epoch 316/5000
102/102 [==============================] - ETA: 0s - loss: 0.1015 - mean_absolute_error: 0.243 - 0s 215us/step - loss: 0.0956 - mean_absolute_error: 0.2466 - val_loss: 0.0900 - val_mean_absolute_error: 0.2290

Epoch 00316: val_loss did not improve from 0.08486
Epoch 317/5000
102/102 [==============================] - ETA: 0s - loss: 0.0823 - mean_absolute_error: 0.227 - 0s 186us/step - loss: 0.0852 - mean_absolute_error: 0.2269 - val_loss: 0.0906 - val_mean_absolute_error: 0.2285

Epoch 00317: val_loss did not improve from 0.08486
Epoch 318/5000
102/102 [==============================] - ETA: 0s - loss: 0.0880 - mean_absolute_error: 0.226 - 0s 215us/step - loss: 0.0890 - mean_absolute_error: 0.2331 - val_loss: 0.0914 - val_mean_absolute_error: 0.2285

Epoch 00318: val_loss did not improve from 0.08486
Epoch 319/5000
102/102 [==============================] - ETA: 0s - loss: 0.1118 - mean_absolute_error: 0.262 - 0s 215us/step - loss: 0.1034 - mean_absolute_error: 0.2542 - val_loss: 0.0931 - val_mean_absolute_error: 0.2303

Epoch 00319: val_loss did not improve from 0.08486
Epoch 320/5000
102/102 [==============================] - ETA: 0s - loss: 0.0752 - mean_absolute_error: 0.198 - 0s 205us/step - loss: 0.1024 - mean_absolute_error: 0.2412 - val_loss: 0.0929 - val_mean_absolute_error: 0.2303

Epoch 00320: val_loss did not improve from 0.08486
Epoch 321/5000
102/102 [==============================] - ETA: 0s - loss: 0.0717 - mean_absolute_error: 0.197 - 0s 196us/step - loss: 0.0865 - mean_absolute_error: 0.2243 - val_loss: 0.0915 - val_mean_absolute_error: 0.2292

Epoch 00321: val_loss did not improve from 0.08486
Epoch 322/5000
102/102 [==============================] - ETA: 0s - loss: 0.0875 - mean_absolute_error: 0.211 - 0s 205us/step - loss: 0.0781 - mean_absolute_error: 0.2098 - val_loss: 0.0900 - val_mean_absolute_error: 0.2279

Epoch 00322: val_loss did not improve from 0.08486
Epoch 323/5000
102/102 [==============================] - ETA: 0s - loss: 0.0794 - mean_absolute_error: 0.220 - 0s 215us/step - loss: 0.0883 - mean_absolute_error: 0.2336 - val_loss: 0.0894 - val_mean_absolute_error: 0.2269

Epoch 00323: val_loss did not improve from 0.08486
Epoch 324/5000
102/102 [==============================] - ETA: 0s - loss: 0.1084 - mean_absolute_error: 0.248 - 0s 205us/step - loss: 0.1024 - mean_absolute_error: 0.2455 - val_loss: 0.0889 - val_mean_absolute_error: 0.2261

Epoch 00324: val_loss did not improve from 0.08486
Epoch 325/5000
102/102 [==============================] - ETA: 0s - loss: 0.1003 - mean_absolute_error: 0.233 - 0s 205us/step - loss: 0.0985 - mean_absolute_error: 0.2406 - val_loss: 0.0887 - val_mean_absolute_error: 0.2258

Epoch 00325: val_loss did not improve from 0.08486
Epoch 326/5000
102/102 [==============================] - ETA: 0s - loss: 0.0909 - mean_absolute_error: 0.233 - 0s 225us/step - loss: 0.0778 - mean_absolute_error: 0.2144 - val_loss: 0.0901 - val_mean_absolute_error: 0.2267

Epoch 00326: val_loss did not improve from 0.08486
Epoch 327/5000
102/102 [==============================] - ETA: 0s - loss: 0.0783 - mean_absolute_error: 0.216 - 0s 196us/step - loss: 0.0795 - mean_absolute_error: 0.2132 - val_loss: 0.0908 - val_mean_absolute_error: 0.2274

Epoch 00327: val_loss did not improve from 0.08486
Epoch 328/5000
102/102 [==============================] - ETA: 0s - loss: 0.0831 - mean_absolute_error: 0.217 - 0s 205us/step - loss: 0.0887 - mean_absolute_error: 0.2257 - val_loss: 0.0914 - val_mean_absolute_error: 0.2281

Epoch 00328: val_loss did not improve from 0.08486
Epoch 329/5000
102/102 [==============================] - ETA: 0s - loss: 0.0959 - mean_absolute_error: 0.235 - 0s 186us/step - loss: 0.0969 - mean_absolute_error: 0.2449 - val_loss: 0.0921 - val_mean_absolute_error: 0.2291

Epoch 00329: val_loss did not improve from 0.08486
Epoch 330/5000
102/102 [==============================] - ETA: 0s - loss: 0.0737 - mean_absolute_error: 0.209 - 0s 205us/step - loss: 0.0928 - mean_absolute_error: 0.2350 - val_loss: 0.0914 - val_mean_absolute_error: 0.2292

Epoch 00330: val_loss did not improve from 0.08486
Epoch 331/5000
102/102 [==============================] - ETA: 0s - loss: 0.0901 - mean_absolute_error: 0.236 - 0s 235us/step - loss: 0.0970 - mean_absolute_error: 0.2378 - val_loss: 0.0895 - val_mean_absolute_error: 0.2285

Epoch 00331: val_loss did not improve from 0.08486
Epoch 332/5000
102/102 [==============================] - ETA: 0s - loss: 0.0835 - mean_absolute_error: 0.216 - 0s 215us/step - loss: 0.1033 - mean_absolute_error: 0.2427 - val_loss: 0.0868 - val_mean_absolute_error: 0.2268

Epoch 00332: val_loss did not improve from 0.08486
Epoch 333/5000
102/102 [==============================] - ETA: 0s - loss: 0.0775 - mean_absolute_error: 0.219 - 0s 215us/step - loss: 0.0817 - mean_absolute_error: 0.2201 - val_loss: 0.0850 - val_mean_absolute_error: 0.2250

Epoch 00333: val_loss did not improve from 0.08486
Epoch 334/5000
102/102 [==============================] - ETA: 0s - loss: 0.0679 - mean_absolute_error: 0.204 - 0s 205us/step - loss: 0.0745 - mean_absolute_error: 0.2115 - val_loss: 0.0863 - val_mean_absolute_error: 0.2260

Epoch 00334: val_loss did not improve from 0.08486
Epoch 335/5000
102/102 [==============================] - ETA: 0s - loss: 0.0778 - mean_absolute_error: 0.223 - 0s 215us/step - loss: 0.0953 - mean_absolute_error: 0.2383 - val_loss: 0.0883 - val_mean_absolute_error: 0.2277

Epoch 00335: val_loss did not improve from 0.08486
Epoch 336/5000
102/102 [==============================] - ETA: 0s - loss: 0.0928 - mean_absolute_error: 0.233 - 0s 195us/step - loss: 0.0794 - mean_absolute_error: 0.2180 - val_loss: 0.0906 - val_mean_absolute_error: 0.2297

Epoch 00336: val_loss did not improve from 0.08486
Epoch 337/5000
102/102 [==============================] - ETA: 0s - loss: 0.0785 - mean_absolute_error: 0.212 - 0s 215us/step - loss: 0.0876 - mean_absolute_error: 0.2199 - val_loss: 0.0914 - val_mean_absolute_error: 0.2305

Epoch 00337: val_loss did not improve from 0.08486
Epoch 338/5000
102/102 [==============================] - ETA: 0s - loss: 0.1006 - mean_absolute_error: 0.236 - 0s 215us/step - loss: 0.0896 - mean_absolute_error: 0.2275 - val_loss: 0.0920 - val_mean_absolute_error: 0.2311

Epoch 00338: val_loss did not improve from 0.08486
Epoch 339/5000
102/102 [==============================] - ETA: 0s - loss: 0.0979 - mean_absolute_error: 0.236 - 0s 205us/step - loss: 0.0780 - mean_absolute_error: 0.2145 - val_loss: 0.0933 - val_mean_absolute_error: 0.2328

Epoch 00339: val_loss did not improve from 0.08486
Epoch 340/5000
102/102 [==============================] - ETA: 0s - loss: 0.0878 - mean_absolute_error: 0.229 - 0s 195us/step - loss: 0.0969 - mean_absolute_error: 0.2370 - val_loss: 0.0929 - val_mean_absolute_error: 0.2328

Epoch 00340: val_loss did not improve from 0.08486
Epoch 341/5000
102/102 [==============================] - ETA: 0s - loss: 0.0825 - mean_absolute_error: 0.224 - 0s 196us/step - loss: 0.0796 - mean_absolute_error: 0.2146 - val_loss: 0.0918 - val_mean_absolute_error: 0.2323

Epoch 00341: val_loss did not improve from 0.08486
Epoch 342/5000
102/102 [==============================] - ETA: 0s - loss: 0.1210 - mean_absolute_error: 0.277 - 0s 215us/step - loss: 0.1080 - mean_absolute_error: 0.2554 - val_loss: 0.0899 - val_mean_absolute_error: 0.2302

Epoch 00342: val_loss did not improve from 0.08486
Epoch 343/5000
102/102 [==============================] - ETA: 0s - loss: 0.0821 - mean_absolute_error: 0.215 - 0s 225us/step - loss: 0.1061 - mean_absolute_error: 0.2498 - val_loss: 0.0885 - val_mean_absolute_error: 0.2279

Epoch 00343: val_loss did not improve from 0.08486
Epoch 344/5000
102/102 [==============================] - ETA: 0s - loss: 0.1262 - mean_absolute_error: 0.268 - 0s 196us/step - loss: 0.0913 - mean_absolute_error: 0.2302 - val_loss: 0.0880 - val_mean_absolute_error: 0.2267

Epoch 00344: val_loss did not improve from 0.08486
Epoch 345/5000
102/102 [==============================] - ETA: 0s - loss: 0.1046 - mean_absolute_error: 0.249 - 0s 196us/step - loss: 0.0950 - mean_absolute_error: 0.2426 - val_loss: 0.0897 - val_mean_absolute_error: 0.2285

Epoch 00345: val_loss did not improve from 0.08486
Epoch 346/5000
102/102 [==============================] - ETA: 0s - loss: 0.0784 - mean_absolute_error: 0.221 - 0s 196us/step - loss: 0.0830 - mean_absolute_error: 0.2228 - val_loss: 0.0927 - val_mean_absolute_error: 0.2308

Epoch 00346: val_loss did not improve from 0.08486
Epoch 347/5000
102/102 [==============================] - ETA: 0s - loss: 0.0998 - mean_absolute_error: 0.242 - 0s 176us/step - loss: 0.0940 - mean_absolute_error: 0.2340 - val_loss: 0.0944 - val_mean_absolute_error: 0.2315

Epoch 00347: val_loss did not improve from 0.08486
Epoch 348/5000
102/102 [==============================] - ETA: 0s - loss: 0.0855 - mean_absolute_error: 0.212 - 0s 186us/step - loss: 0.0914 - mean_absolute_error: 0.2254 - val_loss: 0.0940 - val_mean_absolute_error: 0.2303

Epoch 00348: val_loss did not improve from 0.08486
Epoch 349/5000
102/102 [==============================] - ETA: 0s - loss: 0.0883 - mean_absolute_error: 0.224 - 0s 205us/step - loss: 0.0855 - mean_absolute_error: 0.2237 - val_loss: 0.0937 - val_mean_absolute_error: 0.2297

Epoch 00349: val_loss did not improve from 0.08486
Epoch 350/5000
102/102 [==============================] - ETA: 0s - loss: 0.0725 - mean_absolute_error: 0.204 - 0s 225us/step - loss: 0.0841 - mean_absolute_error: 0.2133 - val_loss: 0.0947 - val_mean_absolute_error: 0.2307

Epoch 00350: val_loss did not improve from 0.08486
Epoch 351/5000
102/102 [==============================] - ETA: 0s - loss: 0.0821 - mean_absolute_error: 0.213 - 0s 205us/step - loss: 0.0812 - mean_absolute_error: 0.2210 - val_loss: 0.0941 - val_mean_absolute_error: 0.2316

Epoch 00351: val_loss did not improve from 0.08486
Epoch 352/5000
102/102 [==============================] - ETA: 0s - loss: 0.1161 - mean_absolute_error: 0.280 - 0s 186us/step - loss: 0.0930 - mean_absolute_error: 0.2404 - val_loss: 0.0915 - val_mean_absolute_error: 0.2308

Epoch 00352: val_loss did not improve from 0.08486
Epoch 353/5000
102/102 [==============================] - ETA: 0s - loss: 0.0744 - mean_absolute_error: 0.189 - 0s 196us/step - loss: 0.0947 - mean_absolute_error: 0.2338 - val_loss: 0.0893 - val_mean_absolute_error: 0.2291

Epoch 00353: val_loss did not improve from 0.08486
Epoch 354/5000
102/102 [==============================] - ETA: 0s - loss: 0.0863 - mean_absolute_error: 0.239 - 0s 205us/step - loss: 0.0831 - mean_absolute_error: 0.2294 - val_loss: 0.0883 - val_mean_absolute_error: 0.2281

Epoch 00354: val_loss did not improve from 0.08486
Epoch 355/5000
102/102 [==============================] - ETA: 0s - loss: 0.1356 - mean_absolute_error: 0.294 - 0s 205us/step - loss: 0.0952 - mean_absolute_error: 0.2425 - val_loss: 0.0898 - val_mean_absolute_error: 0.2288

Epoch 00355: val_loss did not improve from 0.08486
Epoch 356/5000
102/102 [==============================] - ETA: 0s - loss: 0.0963 - mean_absolute_error: 0.237 - 0s 225us/step - loss: 0.0974 - mean_absolute_error: 0.2401 - val_loss: 0.0930 - val_mean_absolute_error: 0.2316

Epoch 00356: val_loss did not improve from 0.08486
Epoch 357/5000
102/102 [==============================] - ETA: 0s - loss: 0.0845 - mean_absolute_error: 0.230 - 0s 205us/step - loss: 0.0811 - mean_absolute_error: 0.2228 - val_loss: 0.0942 - val_mean_absolute_error: 0.2330

Epoch 00357: val_loss did not improve from 0.08486
Epoch 358/5000
102/102 [==============================] - ETA: 0s - loss: 0.1079 - mean_absolute_error: 0.264 - 0s 215us/step - loss: 0.0936 - mean_absolute_error: 0.2396 - val_loss: 0.0938 - val_mean_absolute_error: 0.2327

Epoch 00358: val_loss did not improve from 0.08486
13/13 [==============================] - 0s 153us/step
Loss: 0.06686072796583176    MAE: 0.19180764257907867
In [13]:
from PIL import Image
import matplotlib.pyplot as plt
%matplotlib inline
for i in os.listdir('find_phone_test_images'):
    img1=Image.open('find_phone_test_images/'+i)
    img=img1.resize((224,224))
    '''img=np.array(img1).astype('float')
    img -= img.min() # shifted to 0..max
    img *= 1 / img.max()
    '''
    test_img=np.expand_dims(img,axis=0)
    #print(test_img.shape)
    test_input=model2.predict(preprocess_input(test_img))
    loc=dnn_model.predict(test_input)
    print(loc[0])
    visualize(img1,loc[0])
    plt.show()
[0.25180376 0.24587122]
50 93 110 153
[0.21660244 0.18191382]
29 76 89 136
[0.30478472 0.38343698]
95 119 155 179
[0.25425056 0.12619677]
11 94 71 154
[0.19728512 0.06156806]
-10 66 50 126
[0.06280089 0.419859  ]
106 0 166 60
[-0.06679661  0.5259008 ]
141 -62 201 -2
[0.413471   0.22051859]
41 172 101 232
[0.22357242 0.32039952]
74 79 134 139
[0.23103258 0.32572904]
76 83 136 143
[0.17806214 0.21014671]
38 57 98 117
[0.18638578 0.25292912]
52 61 112 121
[0.3775662  0.23530316]
46 155 106 215
[0.38745657 0.38839215]
96 159 156 219
[0.4196905  0.22812566]
44 175 104 235
[0.46398506 0.38495955]
95 197 155 257
[0.1338747 0.1944781]
33 35 93 95
[0.19923347 0.15037261]
19 67 79 127
[0.64766204 0.2912062 ]
64 287 124 347
[0.344916   0.36135477]
87 139 147 199
In [14]:
def test_train_dev_split2(input_data, output_data, train=0.8, dev=0.1,
                         test=0.1):
    #make seed for exact results everything
    #input_data=preprocess_input(input_data)
    input_data, output_data = shuffle(input_data, output_data, random_state=0)
    
    
    for num in range(0,len(input_data)):
        input_data[num]=np.array(input_data[num].resize((224,224))).astype('float')
        input_data[num] -= input_data[num].min() # shifted to 0..max
        input_data[num] *= 1 / input_data[num].max()
        
    
    split1 = int(train * len(input_data))
    split2 = int((train + dev) * len(input_data))
    train_input = input_data[:split1]
    dev_input = input_data[split1:split2]
    test_input = input_data[split2:]
    

    train_output = output_data[:split1]
    dev_output = output_data[split1:split2]
    test_output = output_data[split2:]
    print(train_input[0])

    return np.array(train_input),np.array(train_output),np.array(dev_input), np.array(dev_output),np.array(test_input), np.array(test_output)
In [ ]:
 
In [15]:
def r2_keras(y_true, y_pred):
    SS_res =  K.sum(K.square(y_true - y_pred)) 
    SS_tot = K.sum(K.square(y_true - K.mean(y_true))) 
    return ( 1 - SS_res/(SS_tot + K.epsilon()) )
In [ ]:
 
In [16]:
# Experiment 2 
"""
'Direct model: raw image classification'
 
"""
def phone_finder_model_cnn(input_data, output_data):

    train_in, train_out, dev_in, dev_out, test_in, test_out = test_train_dev_split2(
        input_data, output_data)

    #https://arxiv.org/pdf/1509.05371v2.pdf
    
    #print(np.array(train_in).shape)
    model=Sequential()
    model.add(Conv2D(5,110,activation='relu',input_shape=(224,224,3)))
    model.add(MaxPool2D())
    model.add(Dropout(0.4))
    
    '''model.add(Conv2D(9,55,activation='relu'))
    model.add(MaxPool2D())
    model.add(Dropout(0.4))'''
    
    model.add(Conv2D(3,27,activation='relu'))
    model.add(MaxPool2D())
    model.add(Dropout(0.4))
    
    
    model.add(Flatten())
    model.add(BatchNormalization())
              
    model.add(Dense(8,activation='relu'))
    model.add(Dropout(0.3))
              
    model.add(Dense(2,activation='linear'))
    
    print(model.input_shape,model.output_shape)
    print(model.summary())

    '''
    model.compile(
        optimizer=adam(0.001),
        loss=[focal_loss(alpha=.25, gamma=2)],
        metrics=['accuracy'])
    
    '''
    model.compile(
        optimizer=adam(0.0001),
        loss='mse',
        metrics=['mae'])
    
    early = EarlyStopping(patience=5)
    
    check = ModelCheckpoint(
        'find_phone_cnn_model.h5',
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        save_weights_only=False,
        mode='auto',
        period=1)

    model_history=model.fit(
        train_in,
        train_out,
        batch_size=32,
        callbacks=[early, check],
        validation_data=(dev_in, dev_out),
        epochs=5000)

    loss,mse = model.evaluate(test_in, test_out)
    print("Loss: {0}    MSE: {1}".format(loss, mse))
    
    plt.plot(model_history.history['loss'])
    plt.plot(model_history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'test'], loc='upper left')
    plt.show()
    

    return model

model=phone_finder_model_cnn(input_data,labels)
[[[0.41568627 0.39607843 0.38039216]
  [0.46666667 0.44705882 0.43137255]
  [0.4627451  0.44313725 0.42745098]
  ...
  [0.00392157 0.00392157 0.00392157]
  [0.00392157 0.00392157 0.00392157]
  [0.00784314 0.00784314 0.00784314]]

 [[0.47058824 0.45098039 0.43529412]
  [0.45490196 0.43529412 0.41960784]
  [0.4627451  0.44313725 0.42745098]
  ...
  [0.00784314 0.00784314 0.00784314]
  [0.         0.         0.        ]
  [0.         0.         0.        ]]

 [[0.45490196 0.43529412 0.41960784]
  [0.45098039 0.43137255 0.41568627]
  [0.45490196 0.43529412 0.41960784]
  ...
  [0.00784314 0.00784314 0.00784314]
  [0.00392157 0.00392157 0.00392157]
  [0.         0.         0.        ]]

 ...

 [[0.3254902  0.29411765 0.20392157]
  [0.26666667 0.23529412 0.18431373]
  [0.18823529 0.16078431 0.12156863]
  ...
  [0.4        0.38823529 0.36862745]
  [0.4        0.38823529 0.36862745]
  [0.38431373 0.37254902 0.34509804]]

 [[0.34117647 0.30980392 0.21960784]
  [0.23137255 0.2        0.14901961]
  [0.21960784 0.19215686 0.15294118]
  ...
  [0.4        0.38823529 0.36862745]
  [0.4        0.38823529 0.36862745]
  [0.39215686 0.38039216 0.35294118]]

 [[0.30980392 0.27843137 0.18823529]
  [0.23921569 0.20784314 0.15686275]
  [0.20392157 0.17647059 0.1372549 ]
  ...
  [0.4        0.38823529 0.36862745]
  [0.39607843 0.38431373 0.36470588]
  [0.4        0.38823529 0.36078431]]]
(None, 224, 224, 3) (None, 2)
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
conv2d_1 (Conv2D)            (None, 115, 115, 5)       181505    
_________________________________________________________________
max_pooling2d_1 (MaxPooling2 (None, 57, 57, 5)         0         
_________________________________________________________________
dropout_3 (Dropout)          (None, 57, 57, 5)         0         
_________________________________________________________________
conv2d_2 (Conv2D)            (None, 31, 31, 3)         10938     
_________________________________________________________________
max_pooling2d_2 (MaxPooling2 (None, 15, 15, 3)         0         
_________________________________________________________________
dropout_4 (Dropout)          (None, 15, 15, 3)         0         
_________________________________________________________________
flatten_1 (Flatten)          (None, 675)               0         
_________________________________________________________________
batch_normalization_1 (Batch (None, 675)               2700      
_________________________________________________________________
dense_4 (Dense)              (None, 8)                 5408      
_________________________________________________________________
dropout_5 (Dropout)          (None, 8)                 0         
_________________________________________________________________
dense_5 (Dense)              (None, 2)                 18        
=================================================================
Total params: 200,569
Trainable params: 199,219
Non-trainable params: 1,350
_________________________________________________________________
None
Train on 102 samples, validate on 13 samples
Epoch 1/5000
102/102 [==============================] - ETA: 14s - loss: 2.1155 - mean_absolute_error: 1.23 - ETA: 3s - loss: 2.5242 - mean_absolute_error: 1.3257 - ETA: 0s - loss: 2.5356 - mean_absolute_error: 1.298 - 9s 88ms/step - loss: 2.5366 - mean_absolute_error: 1.2998 - val_loss: 0.4133 - val_mean_absolute_error: 0.5798

Epoch 00001: val_loss improved from inf to 0.41325, saving model to find_phone_cnn_model.h5
Epoch 2/5000
102/102 [==============================] - ETA: 0s - loss: 1.6639 - mean_absolute_error: 1.071 - ETA: 0s - loss: 1.7640 - mean_absolute_error: 1.090 - ETA: 0s - loss: 1.9604 - mean_absolute_error: 1.147 - 0s 3ms/step - loss: 2.0141 - mean_absolute_error: 1.1531 - val_loss: 0.3665 - val_mean_absolute_error: 0.5235

Epoch 00002: val_loss improved from 0.41325 to 0.36654, saving model to find_phone_cnn_model.h5
Epoch 3/5000
102/102 [==============================] - ETA: 0s - loss: 2.7535 - mean_absolute_error: 1.260 - ETA: 0s - loss: 2.5379 - mean_absolute_error: 1.262 - ETA: 0s - loss: 2.5007 - mean_absolute_error: 1.238 - 0s 3ms/step - loss: 2.4500 - mean_absolute_error: 1.2307 - val_loss: 0.3444 - val_mean_absolute_error: 0.5071

Epoch 00003: val_loss improved from 0.36654 to 0.34440, saving model to find_phone_cnn_model.h5
Epoch 4/5000
102/102 [==============================] - ETA: 0s - loss: 2.2843 - mean_absolute_error: 1.259 - ETA: 0s - loss: 2.5642 - mean_absolute_error: 1.313 - ETA: 0s - loss: 2.1790 - mean_absolute_error: 1.190 - 0s 3ms/step - loss: 2.2113 - mean_absolute_error: 1.1986 - val_loss: 0.3670 - val_mean_absolute_error: 0.5243

Epoch 00004: val_loss did not improve from 0.34440
Epoch 5/5000
102/102 [==============================] - ETA: 0s - loss: 1.5093 - mean_absolute_error: 1.065 - ETA: 0s - loss: 1.8866 - mean_absolute_error: 1.103 - ETA: 0s - loss: 1.9943 - mean_absolute_error: 1.107 - 0s 3ms/step - loss: 1.9682 - mean_absolute_error: 1.1026 - val_loss: 0.4267 - val_mean_absolute_error: 0.5688

Epoch 00005: val_loss did not improve from 0.34440
Epoch 6/5000
102/102 [==============================] - ETA: 0s - loss: 1.8942 - mean_absolute_error: 1.130 - ETA: 0s - loss: 1.7241 - mean_absolute_error: 1.046 - ETA: 0s - loss: 1.9092 - mean_absolute_error: 1.125 - 0s 3ms/step - loss: 1.8540 - mean_absolute_error: 1.1085 - val_loss: 0.4286 - val_mean_absolute_error: 0.5763

Epoch 00006: val_loss did not improve from 0.34440
Epoch 7/5000
102/102 [==============================] - ETA: 0s - loss: 2.0933 - mean_absolute_error: 1.147 - ETA: 0s - loss: 1.9592 - mean_absolute_error: 1.113 - ETA: 0s - loss: 1.9852 - mean_absolute_error: 1.140 - 0s 3ms/step - loss: 1.9889 - mean_absolute_error: 1.1442 - val_loss: 0.4589 - val_mean_absolute_error: 0.5984

Epoch 00007: val_loss did not improve from 0.34440
Epoch 8/5000
102/102 [==============================] - ETA: 0s - loss: 1.5311 - mean_absolute_error: 0.954 - ETA: 0s - loss: 1.6952 - mean_absolute_error: 1.061 - ETA: 0s - loss: 2.0373 - mean_absolute_error: 1.145 - 0s 3ms/step - loss: 2.0467 - mean_absolute_error: 1.1559 - val_loss: 0.5185 - val_mean_absolute_error: 0.6447

Epoch 00008: val_loss did not improve from 0.34440
13/13 [==============================] - 0s 2ms/step
Loss: 0.44583988189697266    MSE: 0.6416805982589722
In [19]:
from PIL import Image
import matplotlib.pyplot as plt
%matplotlib inline
for i in os.listdir('find_phone_test_images'):
    img1=Image.open('find_phone_test_images/'+i).resize((224,224))
    img=np.array(img1).astype('float')
    img -= img.min() # shifted to 0..max
    img *= 1 / img.max()
    test_img=np.expand_dims(img,axis=0)
    #print(test_img.shape)
    #test_input=model2.predict(preprocess_input(test_img))
    loc=model.predict(test_img)
    print(loc[0])
    visualize(img1,loc[0])
    plt.show()
[-0.22252253 -0.4727551 ]
-135 -79 -75 -19
[-0.25442266 -0.56584257]
-156 -86 -96 -26
[ 0.08178521 -0.07145099]
-46 -12 14 48
[-0.06284944 -0.234316  ]
-82 -44 -22 16
[-0.11512952 -0.36125222]
-110 -55 -50 5
[-0.04948209 -0.23442566]
-82 -41 -22 19
[-0.23251142 -0.5049254 ]
-143 -82 -83 -22
[ 0.01024407 -0.13855934]
-61 -28 -1 32
[ 0.3602733  -0.19078279]
-72 50 -12 110
[ 0.1403128  -0.11066281]
-54 1 6 61
[ 0.21775466 -0.13930482]
-61 18 -1 78
[-0.12087735 -0.36032325]
-110 -57 -50 3
[ 0.05900535 -0.0642394 ]
-44 -17 16 43
[ 0.22330843 -0.12824185]
-58 20 2 80
[-0.10528253 -0.30882022]
-99 -53 -39 7
[-0.30555937 -0.626063  ]
-170 -98 -110 -38
[ 0.32893223 -0.17482455]
-69 43 -9 103
[-0.09071043 -0.28727838]
-94 -50 -34 10
[-0.21020977 -0.47720614]
-136 -77 -76 -17
[-0.3285564  -0.66774964]
-179 -103 -119 -43
In [ ]:
 
In [ ]:
'''#Since we have very less images, Let's try Image augmentation to get some more data but Then how to get the coordinates?

from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img

datagen = ImageDataGenerator(
        rotation_range=40,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode='nearest')

for num in range(0,len(input_data)):
        input_data[num]=np.array(input_data[num])

print(np.array(input_data).shape)
i=0
for batch in datagen.flow(np.array(input_data), batch_size=1,
                          save_to_dir='augmented_imgs', save_prefix='aug', save_format='jpeg'):
    i += 1
    if i > 20:
        break  # otherwise the generator would loop indefinitely
'''
In [ ]:
 
In [ ]:
 
In [ ]: